From e3246922eb2b36e3b30e3b9f9fb0c6c5dcb7334f Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 16 Nov 2021 08:36:13 -0800 Subject: [PATCH 01/45] Add merge state type definitions (#9908) --- beacon-chain/state/types/types.go | 11 ++++--- beacon-chain/state/v3/BUILD.bazel | 16 ++++++++++ beacon-chain/state/v3/types.go | 49 +++++++++++++++++++++++++++++++ config/params/config.go | 1 + config/params/mainnet_config.go | 1 + runtime/version/fork.go | 3 ++ 6 files changed, 77 insertions(+), 4 deletions(-) create mode 100644 beacon-chain/state/v3/BUILD.bazel create mode 100644 beacon-chain/state/v3/types.go diff --git a/beacon-chain/state/types/types.go b/beacon-chain/state/types/types.go index 07adb24dc2..34feb537e3 100644 --- a/beacon-chain/state/types/types.go +++ b/beacon-chain/state/types/types.go @@ -54,12 +54,12 @@ func (f FieldIndex) String(stateVersion int) string { case Slashings: return "slashings" case PreviousEpochAttestations: - if version.Altair == stateVersion { + if version.Altair == stateVersion || version.Merge == stateVersion { return "previousEpochParticipationBits" } return "previousEpochAttestations" case CurrentEpochAttestations: - if version.Altair == stateVersion { + if version.Altair == stateVersion || version.Merge == stateVersion { return "currentEpochParticipationBits" } return "currentEpochAttestations" @@ -77,6 +77,8 @@ func (f FieldIndex) String(stateVersion int) string { return "currentSyncCommittee" case NextSyncCommittee: return "nextSyncCommittee" + case LatestExecutionPayloadHeader: + return "latestExecutionPayloadHeader" default: return "" } @@ -114,12 +116,13 @@ const ( InactivityScores CurrentSyncCommittee NextSyncCommittee + // State fields added in Merge. + LatestExecutionPayloadHeader ) // Altair fields which replaced previous phase 0 fields. const ( - // Epoch Attestations is switched with participation bits in - // Altair. + // Epoch Attestations is switched with participation bits in Altair. PreviousEpochParticipationBits = PreviousEpochAttestations CurrentEpochParticipationBits = CurrentEpochAttestations ) diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel new file mode 100644 index 0000000000..8cf294f3cb --- /dev/null +++ b/beacon-chain/state/v3/BUILD.bazel @@ -0,0 +1,16 @@ +load("@prysm//tools/go:def.bzl", "go_library") + +go_library( + name = "go_default_library", + srcs = ["types.go"], + importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3", + visibility = ["//beacon-chain:__pkg__"], + deps = [ + "//beacon-chain/state/fieldtrie:go_default_library", + "//beacon-chain/state/stateutil:go_default_library", + "//beacon-chain/state/types:go_default_library", + "//config/params:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "@com_github_pkg_errors//:go_default_library", + ], +) diff --git a/beacon-chain/state/v3/types.go b/beacon-chain/state/v3/types.go new file mode 100644 index 0000000000..5f83397f0a --- /dev/null +++ b/beacon-chain/state/v3/types.go @@ -0,0 +1,49 @@ +package v3 + +import ( + "sync" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/params" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +func init() { + fieldMap = make(map[types.FieldIndex]types.DataType, params.BeaconConfig().BeaconStateMergeFieldCount) + + // Initialize the fixed sized arrays. + fieldMap[types.BlockRoots] = types.BasicArray + fieldMap[types.StateRoots] = types.BasicArray + fieldMap[types.RandaoMixes] = types.BasicArray + + // Initialize the composite arrays. + fieldMap[types.Eth1DataVotes] = types.CompositeArray + fieldMap[types.Validators] = types.CompositeArray +} + +// TODO: Add field Aliases for values from the types package. It'll come in part 2. + +// fieldMap keeps track of each field +// to its corresponding data type. +var fieldMap map[types.FieldIndex]types.DataType + +// ErrNilInnerState returns when the inner state is nil and no copy set or get +// operations can be performed on state. +var ErrNilInnerState = errors.New("nil inner state") + +// BeaconState defines a struct containing utilities for the eth2 chain state, defining +// getters and setters for its respective values and helpful functions such as HashTreeRoot(). +type BeaconState struct { + state *ethpb.BeaconStateMerge + lock sync.RWMutex + dirtyFields map[types.FieldIndex]bool + dirtyIndices map[types.FieldIndex][]uint64 + stateFieldLeaves map[types.FieldIndex]*fieldtrie.FieldTrie + rebuildTrie map[types.FieldIndex]bool + valMapHandler *stateutil.ValidatorMapHandler + merkleLayers [][][]byte + sharedFieldReferences map[types.FieldIndex]*stateutil.Reference +} diff --git a/config/params/config.go b/config/params/config.go index 9a59cdd422..a99ac50dbc 100644 --- a/config/params/config.go +++ b/config/params/config.go @@ -121,6 +121,7 @@ type BeaconChainConfig struct { GenesisCountdownInterval time.Duration // How often to log the countdown until the genesis time is reached. BeaconStateFieldCount int // BeaconStateFieldCount defines how many fields are in beacon state. BeaconStateAltairFieldCount int // BeaconStateAltairFieldCount defines how many fields are in beacon state hard fork 1. + BeaconStateMergeFieldCount int // BeaconStateMergeFieldCount defines how many fields are in beacon state post upgrade to the Merge. // Slasher constants. WeakSubjectivityPeriod types.Epoch // WeakSubjectivityPeriod defines the time period expressed in number of epochs were proof of stake network should validate block headers and attestations for slashable events. diff --git a/config/params/mainnet_config.go b/config/params/mainnet_config.go index 97663d9ece..6d15cafba5 100644 --- a/config/params/mainnet_config.go +++ b/config/params/mainnet_config.go @@ -178,6 +178,7 @@ var mainnetBeaconConfig = &BeaconChainConfig{ PresetBase: "mainnet", BeaconStateFieldCount: 21, BeaconStateAltairFieldCount: 24, + BeaconStateMergeFieldCount: 25, // Slasher related values. WeakSubjectivityPeriod: 54000, diff --git a/runtime/version/fork.go b/runtime/version/fork.go index 7a15f1032a..311c47c23f 100644 --- a/runtime/version/fork.go +++ b/runtime/version/fork.go @@ -3,6 +3,7 @@ package version const ( Phase0 = iota Altair + Merge ) func String(version int) string { @@ -11,6 +12,8 @@ func String(version int) string { return "phase0" case Altair: return "altair" + case Merge: + return "merge" default: return "unknown version" } From 6f4c80531c13c065ad56301e7e95102c25735b1c Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 17 Nov 2021 01:04:49 -0800 Subject: [PATCH 02/45] Add field roots for beacon state v3 (#9914) * Add field roots for beacon state * Update BUILD.bazel * Adding an exception for state v3 * fix deadcode Co-authored-by: nisdas --- beacon-chain/state/v3/BUILD.bazel | 22 +- beacon-chain/state/v3/field_root_eth1.go | 59 +++++ beacon-chain/state/v3/field_root_test.go | 23 ++ beacon-chain/state/v3/field_root_validator.go | 89 +++++++ beacon-chain/state/v3/field_root_vector.go | 146 +++++++++++ beacon-chain/state/v3/field_roots.go | 226 ++++++++++++++++++ 6 files changed, 563 insertions(+), 2 deletions(-) create mode 100644 beacon-chain/state/v3/field_root_eth1.go create mode 100644 beacon-chain/state/v3/field_root_test.go create mode 100644 beacon-chain/state/v3/field_root_validator.go create mode 100644 beacon-chain/state/v3/field_root_vector.go create mode 100644 beacon-chain/state/v3/field_roots.go diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel index 8cf294f3cb..f53886ab40 100644 --- a/beacon-chain/state/v3/BUILD.bazel +++ b/beacon-chain/state/v3/BUILD.bazel @@ -1,16 +1,34 @@ -load("@prysm//tools/go:def.bzl", "go_library") +load("@prysm//tools/go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", - srcs = ["types.go"], + srcs = [ + "field_root_eth1.go", + "field_root_validator.go", + "field_root_vector.go", + "field_roots.go", + "types.go", + ], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3", visibility = ["//beacon-chain:__pkg__"], deps = [ "//beacon-chain/state/fieldtrie:go_default_library", "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", + "//config/features:go_default_library", "//config/params:go_default_library", + "//crypto/hash:go_default_library", + "//encoding/bytesutil:go_default_library", + "//encoding/ssz:go_default_library", "//proto/prysm/v1alpha1:go_default_library", + "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", ], ) + +go_test( + name = "go_default_test", + srcs = ["field_root_test.go"], + embed = [":go_default_library"], + deps = ["//testing/assert:go_default_library"], +) diff --git a/beacon-chain/state/v3/field_root_eth1.go b/beacon-chain/state/v3/field_root_eth1.go new file mode 100644 index 0000000000..a02804484e --- /dev/null +++ b/beacon-chain/state/v3/field_root_eth1.go @@ -0,0 +1,59 @@ +package v3 + +import ( + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/config/features" + "github.com/prysmaticlabs/prysm/encoding/ssz" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// eth1Root computes the HashTreeRoot Merkleization of +// a BeaconBlockHeader struct according to the eth2 +// Simple Serialize specification. +func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) { + if eth1Data == nil { + return [32]byte{}, errors.New("nil eth1 data") + } + + enc := stateutil.Eth1DataEncKey(eth1Data) + if features.Get().EnableSSZCache { + if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil { + return found.([32]byte), nil + } + } + + root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data) + if err != nil { + return [32]byte{}, err + } + + if features.Get().EnableSSZCache { + cachedHasher.rootsCache.Set(string(enc), root, 32) + } + return root, nil +} + +// eth1DataVotesRoot computes the HashTreeRoot Merkleization of +// a list of Eth1Data structs according to the eth2 +// Simple Serialize specification. +func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) { + hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes) + if err != nil { + return [32]byte{}, err + } + + if features.Get().EnableSSZCache { + if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil { + return found.([32]byte), nil + } + } + root, err := stateutil.Eth1DatasRoot(eth1DataVotes) + if err != nil { + return [32]byte{}, err + } + if features.Get().EnableSSZCache { + cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32) + } + return root, nil +} diff --git a/beacon-chain/state/v3/field_root_test.go b/beacon-chain/state/v3/field_root_test.go new file mode 100644 index 0000000000..54c8feda18 --- /dev/null +++ b/beacon-chain/state/v3/field_root_test.go @@ -0,0 +1,23 @@ +package v3 + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/testing/assert" +) + +func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) { + _, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing") + assert.NoError(t, err) + _, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing") + assert.NoError(t, err) + _, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing") + assert.NoError(t, err) + _, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing") + assert.ErrorContains(t, "hash layer is a non power of 2", err) +} + +func TestArraysTreeRoot_ZeroLength(t *testing.T) { + _, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing") + assert.ErrorContains(t, "zero leaves provided", err) +} diff --git a/beacon-chain/state/v3/field_root_validator.go b/beacon-chain/state/v3/field_root_validator.go new file mode 100644 index 0000000000..2f5c96a1b2 --- /dev/null +++ b/beacon-chain/state/v3/field_root_validator.go @@ -0,0 +1,89 @@ +package v3 + +import ( + "bytes" + "encoding/binary" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/config/features" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/ssz" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) { + hashKeyElements := make([]byte, len(validators)*32) + roots := make([][32]byte, len(validators)) + emptyKey := hash.FastSum256(hashKeyElements) + hasher := hash.CustomSHA256Hasher() + bytesProcessed := 0 + for i := 0; i < len(validators); i++ { + val, err := h.validatorRoot(hasher, validators[i]) + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization") + } + copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:]) + roots[i] = val + bytesProcessed += 32 + } + + hashKey := hash.FastSum256(hashKeyElements) + if hashKey != emptyKey && h.rootsCache != nil { + if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok { + return found.([32]byte), nil + } + } + + validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit) + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization") + } + validatorsRootsBuf := new(bytes.Buffer) + if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil { + return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length") + } + // We need to mix in the length of the slice. + var validatorsRootsBufRoot [32]byte + copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes()) + res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:]) + if hashKey != emptyKey && h.rootsCache != nil { + h.rootsCache.Set(string(hashKey[:]), res, 32) + } + return res, nil +} + +func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) { + if validator == nil { + return [32]byte{}, errors.New("nil validator") + } + + enc := stateutil.ValidatorEncKey(validator) + // Check if it exists in cache: + if h.rootsCache != nil { + if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok { + return found.([32]byte), nil + } + } + + valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator) + if err != nil { + return [32]byte{}, err + } + + if h.rootsCache != nil { + h.rootsCache.Set(string(enc), valRoot, 32) + } + return valRoot, nil +} + +// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of +// a list of validator structs according to the eth2 +// Simple Serialize specification. +func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) { + if features.Get().EnableSSZCache { + return cachedHasher.validatorRegistryRoot(vals) + } + return nocachedHasher.validatorRegistryRoot(vals) +} diff --git a/beacon-chain/state/v3/field_root_vector.go b/beacon-chain/state/v3/field_root_vector.go new file mode 100644 index 0000000000..7654d45e3d --- /dev/null +++ b/beacon-chain/state/v3/field_root_vector.go @@ -0,0 +1,146 @@ +package v3 + +import ( + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/ssz" +) + +func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) { + lock.Lock() + defer lock.Unlock() + hashFunc := hash.CustomSHA256Hasher() + if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil { + depth := ssz.Depth(length) + layersCache[fieldName] = make([][][32]byte, depth+1) + } + + leaves := make([][32]byte, length) + for i, chunk := range input { + copy(leaves[i][:], chunk) + } + bytesProcessed := 0 + changedIndices := make([]int, 0) + prevLeaves, ok := leavesCache[fieldName] + if len(prevLeaves) == 0 || h.rootsCache == nil { + prevLeaves = leaves + } + + for i := 0; i < len(leaves); i++ { + // We check if any items changed since the roots were last recomputed. + notEqual := leaves[i] != prevLeaves[i] + if ok && h.rootsCache != nil && notEqual { + changedIndices = append(changedIndices, i) + } + bytesProcessed += 32 + } + if len(changedIndices) > 0 && h.rootsCache != nil { + var rt [32]byte + var err error + // If indices did change since last computation, we only recompute + // the modified branches in the cached Merkle tree for this state field. + chunks := leaves + + // We need to ensure we recompute indices of the Merkle tree which + // changed in-between calls to this function. This check adds an offset + // to the recomputed indices to ensure we do so evenly. + maxChangedIndex := changedIndices[len(changedIndices)-1] + if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 { + changedIndices = append(changedIndices, maxChangedIndex+1) + } + for i := 0; i < len(changedIndices); i++ { + rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc) + if err != nil { + return [32]byte{}, err + } + } + leavesCache[fieldName] = chunks + return rt, nil + } + + res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc) + if err != nil { + return [32]byte{}, err + } + if h.rootsCache != nil { + leavesCache[fieldName] = leaves + } + return res, nil +} + +func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) { + items, ok := layersCache[fieldName] + if !ok { + return [32]byte{}, errors.New("could not recompute root as there was no cache found") + } + if items == nil { + return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache") + } + layers := items + root := chunks[idx] + layers[0] = chunks + // The merkle tree structure looks as follows: + // [[r1, r2, r3, r4], [parent1, parent2], [root]] + // Using information about the index which changed, idx, we recompute + // only its branch up the tree. + currentIndex := idx + for i := 0; i < len(layers)-1; i++ { + isLeft := currentIndex%2 == 0 + neighborIdx := currentIndex ^ 1 + + neighbor := [32]byte{} + if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) { + neighbor = layers[i][neighborIdx] + } + if isLeft { + parentHash := hasher(append(root[:], neighbor[:]...)) + root = parentHash + } else { + parentHash := hasher(append(neighbor[:], root[:]...)) + root = parentHash + } + parentIdx := currentIndex / 2 + // Update the cached layers at the parent index. + if len(layers[i+1]) == 0 { + layers[i+1] = append(layers[i+1], root) + } else { + layers[i+1][parentIdx] = root + } + currentIndex = parentIdx + } + layersCache[fieldName] = layers + // If there is only a single leaf, we return it (the identity element). + if len(layers[0]) == 1 { + return layers[0][0], nil + } + return root, nil +} + +func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64, + fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) { + if len(leaves) == 0 { + return [32]byte{}, errors.New("zero leaves provided") + } + if len(leaves) == 1 { + return leaves[0], nil + } + hashLayer := leaves + layers := make([][][32]byte, ssz.Depth(length)+1) + if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil { + if len(items[0]) == len(leaves) { + layers = items + } + } + layers[0] = hashLayer + var err error + layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher) + if err != nil { + return [32]byte{}, err + } + root := hashLayer[0] + if h.rootsCache != nil { + layersCache[fieldName] = layers + } + return root, nil +} diff --git a/beacon-chain/state/v3/field_roots.go b/beacon-chain/state/v3/field_roots.go new file mode 100644 index 0000000000..827dcb3f2a --- /dev/null +++ b/beacon-chain/state/v3/field_roots.go @@ -0,0 +1,226 @@ +package v3 + +import ( + "encoding/binary" + "sync" + + "github.com/dgraph-io/ristretto" + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/config/features" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/encoding/ssz" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +var ( + leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount) + layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount) + lock sync.RWMutex +) + +const cacheSize = 100000 + +var nocachedHasher *stateRootHasher +var cachedHasher *stateRootHasher + +func init() { + rootsCache, err := ristretto.NewCache(&ristretto.Config{ + NumCounters: cacheSize, // number of keys to track frequency of (1M). + MaxCost: 1 << 22, // maximum cost of cache (3MB). + // 100,000 roots will take up approximately 3 MB in memory. + BufferItems: 64, // number of keys per Get buffer. + }) + if err != nil { + panic(err) + } + // Temporarily disable roots cache until cache issues can be resolved. + cachedHasher = &stateRootHasher{rootsCache: rootsCache} + nocachedHasher = &stateRootHasher{} +} + +type stateRootHasher struct { + rootsCache *ristretto.Cache +} + +// computeFieldRoots returns the hash tree root computations of every field in +// the beacon state as a list of 32 byte roots. +//nolint:deadcode +func computeFieldRoots(state *ethpb.BeaconStateMerge) ([][]byte, error) { + if features.Get().EnableSSZCache { + return cachedHasher.computeFieldRootsWithHasher(state) + } + return nocachedHasher.computeFieldRootsWithHasher(state) +} + +func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMerge) ([][]byte, error) { + if state == nil { + return nil, errors.New("nil state") + } + hasher := hash.CustomSHA256Hasher() + fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount) + + // Genesis time root. + genesisRoot := ssz.Uint64Root(state.GenesisTime) + fieldRoots[0] = genesisRoot[:] + + // Genesis validator root. + r := [32]byte{} + copy(r[:], state.GenesisValidatorsRoot) + fieldRoots[1] = r[:] + + // Slot root. + slotRoot := ssz.Uint64Root(uint64(state.Slot)) + fieldRoots[2] = slotRoot[:] + + // Fork data structure root. + forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) + if err != nil { + return nil, errors.Wrap(err, "could not compute fork merkleization") + } + fieldRoots[3] = forkHashTreeRoot[:] + + // BeaconBlockHeader data structure root. + headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader) + if err != nil { + return nil, errors.Wrap(err, "could not compute block header merkleization") + } + fieldRoots[4] = headerHashTreeRoot[:] + + // BlockRoots array root. + blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute block roots merkleization") + } + fieldRoots[5] = blockRootsRoot[:] + + // StateRoots array root. + stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute state roots merkleization") + } + fieldRoots[6] = stateRootsRoot[:] + + // HistoricalRoots slice root. + historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) + if err != nil { + return nil, errors.Wrap(err, "could not compute historical roots merkleization") + } + fieldRoots[7] = historicalRootsRt[:] + + // Eth1Data data structure root. + eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data merkleization") + } + fieldRoots[8] = eth1HashTreeRoot[:] + + // Eth1DataVotes slice root. + eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") + } + fieldRoots[9] = eth1VotesRoot[:] + + // Eth1DepositIndex root. + eth1DepositIndexBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) + eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) + fieldRoots[10] = eth1DepositBuf[:] + + // Validators slice root. + validatorsRoot, err := h.validatorRegistryRoot(state.Validators) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator registry merkleization") + } + fieldRoots[11] = validatorsRoot[:] + + // Balances slice root. + balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator balances merkleization") + } + fieldRoots[12] = balancesRoot[:] + + // RandaoMixes array root. + randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") + if err != nil { + return nil, errors.Wrap(err, "could not compute randao roots merkleization") + } + fieldRoots[13] = randaoRootsRoot[:] + + // Slashings array root. + slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) + if err != nil { + return nil, errors.Wrap(err, "could not compute slashings merkleization") + } + fieldRoots[14] = slashingsRootsRoot[:] + + // PreviousEpochParticipation slice root. + prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization") + } + fieldRoots[15] = prevParticipationRoot[:] + + // CurrentEpochParticipation slice root. + currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation) + if err != nil { + return nil, errors.Wrap(err, "could not compute current epoch participation merkleization") + } + fieldRoots[16] = currParticipationRoot[:] + + // JustificationBits root. + justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) + fieldRoots[17] = justifiedBitsRoot[:] + + // PreviousJustifiedCheckpoint data structure root. + prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") + } + fieldRoots[18] = prevCheckRoot[:] + + // CurrentJustifiedCheckpoint data structure root. + currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") + } + fieldRoots[19] = currJustRoot[:] + + // FinalizedCheckpoint data structure root. + finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") + } + fieldRoots[20] = finalRoot[:] + + // Inactivity scores root. + inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores) + if err != nil { + return nil, errors.Wrap(err, "could not compute inactivityScoreRoot") + } + fieldRoots[21] = inactivityScoresRoot[:] + + // Current sync committee root. + currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee) + if err != nil { + return nil, errors.Wrap(err, "could not compute sync committee merkleization") + } + fieldRoots[22] = currentSyncCommitteeRoot[:] + + // Next sync committee root. + nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee) + if err != nil { + return nil, errors.Wrap(err, "could not compute sync committee merkleization") + } + fieldRoots[23] = nextSyncCommitteeRoot[:] + + // Execution payload root. + //TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65 + fieldRoots[24] = []byte{} + + return fieldRoots, nil +} From 1c4ea75a1868b5efff83ce719b6a80b317ceb77c Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Wed, 17 Nov 2021 23:05:50 +0800 Subject: [PATCH 03/45] Prevent Reprocessing of a Block From Our Pending Queue (#9904) * fix bugs * test * raul's review --- beacon-chain/sync/pending_blocks_queue.go | 29 +++++++--- .../sync/pending_blocks_queue_test.go | 56 +++++++++++++++++-- 2 files changed, 72 insertions(+), 13 deletions(-) diff --git a/beacon-chain/sync/pending_blocks_queue.go b/beacon-chain/sync/pending_blocks_queue.go index be75f2ca01..0daf892d0b 100644 --- a/beacon-chain/sync/pending_blocks_queue.go +++ b/beacon-chain/sync/pending_blocks_queue.go @@ -88,16 +88,29 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { continue } - s.pendingQueueLock.RLock() - inPendingQueue := s.seenPendingBlocks[bytesutil.ToBytes32(b.Block().ParentRoot())] - s.pendingQueueLock.RUnlock() - blkRoot, err := b.Block().HashTreeRoot() if err != nil { tracing.AnnotateError(span, err) span.End() return err } + inDB := s.cfg.beaconDB.HasBlock(ctx, blkRoot) + // No need to process the same block twice. + if inDB { + s.pendingQueueLock.Lock() + if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil { + s.pendingQueueLock.Unlock() + return err + } + s.pendingQueueLock.Unlock() + span.End() + continue + } + + s.pendingQueueLock.RLock() + inPendingQueue := s.seenPendingBlocks[bytesutil.ToBytes32(b.Block().ParentRoot())] + s.pendingQueueLock.RUnlock() + parentIsBad := s.hasBadBlock(bytesutil.ToBytes32(b.Block().ParentRoot())) blockIsBad := s.hasBadBlock(blkRoot) // Check if parent is a bad block. @@ -117,12 +130,12 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { continue } - inDB := s.cfg.beaconDB.HasBlock(ctx, bytesutil.ToBytes32(b.Block().ParentRoot())) + parentInDb := s.cfg.beaconDB.HasBlock(ctx, bytesutil.ToBytes32(b.Block().ParentRoot())) hasPeer := len(pids) != 0 // Only request for missing parent block if it's not in beaconDB, not in pending cache // and has peer in the peer list. - if !inPendingQueue && !inDB && hasPeer { + if !inPendingQueue && !parentInDb && hasPeer { log.WithFields(logrus.Fields{ "currentSlot": b.Block().Slot(), "parentRoot": hex.EncodeToString(bytesutil.Trunc(b.Block().ParentRoot())), @@ -133,7 +146,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { continue } - if !inDB { + if !parentInDb { span.End() continue } @@ -167,6 +180,7 @@ func (s *Service) processPendingBlocks(ctx context.Context) error { s.pendingQueueLock.Lock() if err := s.deleteBlockFromPendingQueue(slot, b, blkRoot); err != nil { + s.pendingQueueLock.Unlock() return err } s.pendingQueueLock.Unlock() @@ -321,6 +335,7 @@ func (s *Service) deleteBlockFromPendingQueue(slot types.Slot, b block.SignedBea } if len(newBlks) == 0 { s.slotToPendingBlocks.Delete(slotToCacheKey(slot)) + delete(s.seenPendingBlocks, r) return nil } diff --git a/beacon-chain/sync/pending_blocks_queue_test.go b/beacon-chain/sync/pending_blocks_queue_test.go index 05aee92848..6110ebad43 100644 --- a/beacon-chain/sync/pending_blocks_queue_test.go +++ b/beacon-chain/sync/pending_blocks_queue_test.go @@ -84,9 +84,13 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks1(t *testing.T) { require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b1), b1Root)) require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b1))) - // Insert bad b1 in the cache to verify the good one doesn't get replaced. - require.NoError(t, r.insertBlockToPendingQueue(b1.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), [32]byte{})) + nBlock := util.NewBeaconBlock() + nBlock.Block.Slot = b1.Block.Slot + nRoot, err := nBlock.Block.HashTreeRoot() + require.NoError(t, err) + // Insert bad b1 in the cache to verify the good one doesn't get replaced. + require.NoError(t, r.insertBlockToPendingQueue(nBlock.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(nBlock), nRoot)) require.NoError(t, r.processPendingBlocks(context.Background())) // Marks a block as bad require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run @@ -140,6 +144,46 @@ func TestRegularSync_InsertDuplicateBlocks(t *testing.T) { } +func TestRegularSyncBeaconBlockSubscriber_DoNotReprocessBlock(t *testing.T) { + db := dbtest.SetupDB(t) + + p1 := p2ptest.NewTestP2P(t) + r := &Service{ + cfg: &config{ + p2p: p1, + beaconDB: db, + chain: &mock.ChainService{ + FinalizedCheckPoint: ðpb.Checkpoint{ + Epoch: 0, + }, + }, + stateGen: stategen.New(db), + }, + slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), + seenPendingBlocks: make(map[[32]byte]bool), + } + r.initCaches() + + b0 := util.NewBeaconBlock() + require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b0))) + b0Root, err := b0.Block.HashTreeRoot() + require.NoError(t, err) + b3 := util.NewBeaconBlock() + b3.Block.Slot = 3 + b3.Block.ParentRoot = b0Root[:] + b3Root, err := b3.Block.HashTreeRoot() + require.NoError(t, err) + + require.NoError(t, r.cfg.beaconDB.SaveBlock(context.Background(), wrapper.WrappedPhase0SignedBeaconBlock(b3))) + + // Add b3 to the cache + require.NoError(t, r.insertBlockToPendingQueue(b3.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b3), b3Root)) + + require.NoError(t, r.processPendingBlocks(context.Background())) + assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") + assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block") +} + // /- b1 - b2 - b5 // b0 // \- b3 - b4 @@ -237,7 +281,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run assert.Equal(t, 1, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") - assert.Equal(t, 3, len(r.seenPendingBlocks), "Incorrect size for seen pending block") + assert.Equal(t, 1, len(r.seenPendingBlocks), "Incorrect size for seen pending block") // Add b2 to the cache require.NoError(t, r.insertBlockToPendingQueue(b2.Block.Slot, wrapper.WrappedPhase0SignedBeaconBlock(b2), b2Root)) @@ -248,7 +292,7 @@ func TestRegularSyncBeaconBlockSubscriber_ProcessPendingBlocks_2Chains(t *testin require.NoError(t, r.processPendingBlocks(context.Background())) // Bad block removed on second run assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") - assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block") + assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block") } func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) { @@ -318,7 +362,7 @@ func TestRegularSyncBeaconBlockSubscriber_PruneOldPendingBlocks(t *testing.T) { require.NoError(t, r.processPendingBlocks(context.Background())) assert.Equal(t, 0, len(r.slotToPendingBlocks.Items()), "Incorrect size for slot to pending blocks cache") - assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block") + assert.Equal(t, 0, len(r.seenPendingBlocks), "Incorrect size for seen pending block") } func TestService_sortedPendingSlots(t *testing.T) { @@ -429,7 +473,7 @@ func TestService_BatchRootRequest(t *testing.T) { assert.Equal(t, 4, len(r.seenPendingBlocks), "Incorrect size for seen pending block") } -func TestService_AddPeningBlockToQueueOverMax(t *testing.T) { +func TestService_AddPendingBlockToQueueOverMax(t *testing.T) { r := &Service{ slotToPendingBlocks: gcache.New(time.Second, 2*time.Second), seenPendingBlocks: make(map[[32]byte]bool), From a78cdf86cc4e614ced18aa7af32c89b4372657c7 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 17 Nov 2021 10:39:43 -0500 Subject: [PATCH 04/45] Warn Users if Slashing Protection History is Empty (#9909) * warning in case not found * better comment * fatal --- cmd/validator/slashing-protection/export.go | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/cmd/validator/slashing-protection/export.go b/cmd/validator/slashing-protection/export.go index fb999e278a..a3eee73856 100644 --- a/cmd/validator/slashing-protection/export.go +++ b/cmd/validator/slashing-protection/export.go @@ -67,6 +67,22 @@ func exportSlashingProtectionJSON(cliCtx *cli.Context) error { if err != nil { return errors.Wrap(err, "could not export slashing protection history") } + + // Check if JSON data is empty and issue a warning about common problems to the user. + if eipJSON == nil || len(eipJSON.Data) == 0 { + log.Fatalf( + "No slashing protection data was found in the %s directory. This is either because (1) your "+ + "validator client does not have any history of blocks or attestations yet, or (2) "+ + "this may be because your validator database is stored in a --datadir which could be a different "+ + "directory than the --wallet-dir you set when running your validator. You might have an empty "+ + "validator.db in your --wallet-dir that was created by accident in a previous Prysm version. "+ + "Check if the directory you are passing in has a validator.db file in it and then run "+ + "the command with right directory. Also, check if your --wallet-dir is different from your --datadir "+ + "when you ran your Prysm validator", + dataDir, + ) + } + outputDir, err := userprompt.InputDirectory( cliCtx, "Enter your desired output directory for your slashing protection history file", From 9b374187618282d63c215b8c0a8464472db63061 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Thu, 18 Nov 2021 15:49:19 -0500 Subject: [PATCH 05/45] Warn Users In Case Slashing Protection Exports are Empty (#9919) * export text * Update cmd/validator/slashing-protection/export.go Co-authored-by: Potuz Co-authored-by: Potuz --- cmd/validator/slashing-protection/export.go | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/cmd/validator/slashing-protection/export.go b/cmd/validator/slashing-protection/export.go index a3eee73856..7b08821d9e 100644 --- a/cmd/validator/slashing-protection/export.go +++ b/cmd/validator/slashing-protection/export.go @@ -70,16 +70,12 @@ func exportSlashingProtectionJSON(cliCtx *cli.Context) error { // Check if JSON data is empty and issue a warning about common problems to the user. if eipJSON == nil || len(eipJSON.Data) == 0 { - log.Fatalf( - "No slashing protection data was found in the %s directory. This is either because (1) your "+ - "validator client does not have any history of blocks or attestations yet, or (2) "+ - "this may be because your validator database is stored in a --datadir which could be a different "+ - "directory than the --wallet-dir you set when running your validator. You might have an empty "+ - "validator.db in your --wallet-dir that was created by accident in a previous Prysm version. "+ - "Check if the directory you are passing in has a validator.db file in it and then run "+ - "the command with right directory. Also, check if your --wallet-dir is different from your --datadir "+ - "when you ran your Prysm validator", - dataDir, + log.Fatal( + "No slashing protection data was found in your database. This is likely because an older version of " + + "Prysm would place your validator database in your wallet directory as a validator.db file. Now, " + + "Prysm keeps its validator database inside the direct/ or derived/ folder in your wallet directory. " + + "Try running this command again, but add direct/ or derived/ to the path where your wallet " + + "directory is in and you should obtain your slashing protection history", ) } From cae58bbbd838b7553647750b6f5257a1ac24868e Mon Sep 17 00:00:00 2001 From: Preston Van Loon Date: Thu, 18 Nov 2021 17:17:19 -0600 Subject: [PATCH 06/45] Unskip v2 end to end check for prior release (#9920) --- testing/endtoend/minimal_e2e_test.go | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/testing/endtoend/minimal_e2e_test.go b/testing/endtoend/minimal_e2e_test.go index f0f1ebdcb5..62e00256ea 100644 --- a/testing/endtoend/minimal_e2e_test.go +++ b/testing/endtoend/minimal_e2e_test.go @@ -62,12 +62,7 @@ func e2eMinimal(t *testing.T, usePrysmSh bool) { ev.APIGatewayV1Alpha1VerifyIntegrity, ev.FinishedSyncing, ev.AllNodesHaveSameHead, - } - // TODO(#9166): remove this block once v2 changes are live. - if !usePrysmSh { - evals = append(evals, ev.ValidatorSyncParticipation) - } else { - t.Log("Warning: Skipping v2 specific evaluators for prior release") + ev.ValidatorSyncParticipation, } testConfig := &types.E2EConfig{ BeaconFlags: []string{ From 50159c2e4822613cb1fe581433d367ae18a988f2 Mon Sep 17 00:00:00 2001 From: Potuz Date: Thu, 18 Nov 2021 22:14:56 -0300 Subject: [PATCH 07/45] Monitor attestations (#9901) Log attestation performance on the validator monitor --- beacon-chain/monitor/BUILD.bazel | 15 + beacon-chain/monitor/process_attestation.go | 213 +++++++++++++ .../monitor/process_attestation_test.go | 287 ++++++++++++++++++ beacon-chain/monitor/service.go | 29 +- beacon-chain/state/stategen/getter.go | 9 + beacon-chain/state/stategen/getter_test.go | 38 +++ beacon-chain/state/stategen/mock.go | 5 + beacon-chain/state/stategen/service.go | 1 + 8 files changed, 596 insertions(+), 1 deletion(-) create mode 100644 beacon-chain/monitor/process_attestation.go create mode 100644 beacon-chain/monitor/process_attestation_test.go diff --git a/beacon-chain/monitor/BUILD.bazel b/beacon-chain/monitor/BUILD.bazel index 876049a57a..8bdf75e6e6 100644 --- a/beacon-chain/monitor/BUILD.bazel +++ b/beacon-chain/monitor/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "process_attestation.go", "process_block.go", "process_exit.go", "service.go", @@ -11,10 +12,18 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor", visibility = ["//beacon-chain:__subpackages__"], deps = [ + "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/blocks:go_default_library", + "//beacon-chain/core/helpers:go_default_library", + "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stategen:go_default_library", + "//config/params:go_default_library", "//encoding/bytesutil:go_default_library", "//proto/prysm/v1alpha1:go_default_library", + "//proto/prysm/v1alpha1/attestation:go_default_library", "//proto/prysm/v1alpha1/block:go_default_library", + "//runtime/version:go_default_library", + "//time/slots:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", ], @@ -23,18 +32,24 @@ go_library( go_test( name = "go_default_test", srcs = [ + "process_attestation_test.go", "process_block_test.go", "process_exit_test.go", "service_test.go", ], embed = [":go_default_library"], deps = [ + "//beacon-chain/db/testing:go_default_library", + "//beacon-chain/state/stategen:go_default_library", "//config/params:go_default_library", + "//encoding/bytesutil:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1/wrapper:go_default_library", "//testing/require:go_default_library", "//testing/util:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@com_github_sirupsen_logrus//:go_default_library", "@com_github_sirupsen_logrus//hooks/test:go_default_library", ], ) diff --git a/beacon-chain/monitor/process_attestation.go b/beacon-chain/monitor/process_attestation.go new file mode 100644 index 0000000000..c0aafc24f9 --- /dev/null +++ b/beacon-chain/monitor/process_attestation.go @@ -0,0 +1,213 @@ +package monitor + +import ( + "context" + "fmt" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/core/altair" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/attestation" + "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" + "github.com/prysmaticlabs/prysm/runtime/version" + "github.com/prysmaticlabs/prysm/time/slots" + "github.com/sirupsen/logrus" +) + +// updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the +// given slot is different than the last attested slot from this validator. +func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool { + if !s.TrackedIndex(types.ValidatorIndex(idx)) { + return false + } + + if lp, ok := s.latestPerformance[types.ValidatorIndex(idx)]; ok { + return lp.attestedSlot != slot + } + return false +} + +// attestingIndices returns the indices of validators that appear in the +// given aggregated atestation. +func attestingIndices(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) ([]uint64, error) { + committee, err := helpers.BeaconCommitteeFromState(ctx, state, att.Data.Slot, att.Data.CommitteeIndex) + if err != nil { + return nil, err + } + return attestation.AttestingIndices(att.AggregationBits, committee) +} + +// logMessageTimelyFlagsForIndex returns the log message with the basic +// performance indicators for the attestation (head, source, target) +func logMessageTimelyFlagsForIndex(idx types.ValidatorIndex, data *ethpb.AttestationData) logrus.Fields { + return logrus.Fields{ + "ValidatorIndex": idx, + "Slot": data.Slot, + "Source": fmt.Sprintf("%#x", bytesutil.Trunc(data.Source.Root)), + "Target": fmt.Sprintf("%#x", bytesutil.Trunc(data.Target.Root)), + "Head": fmt.Sprintf("%#x", bytesutil.Trunc(data.BeaconBlockRoot)), + } +} + +// processAttestations logs the event that one of our tracked validators' +// attestations was included in a block +func (s *Service) processAttestations(ctx context.Context, state state.BeaconState, blk block.BeaconBlock) { + if blk == nil || blk.Body() == nil { + return + } + for _, attestation := range blk.Body().Attestations() { + s.processIncludedAttestation(ctx, state, attestation) + } +} + +// processIncludedAttestation logs in the event that one of our tracked validators' +// appears in the attesting indices and this included attestation was not included +// before. +func (s *Service) processIncludedAttestation(ctx context.Context, state state.BeaconState, att *ethpb.Attestation) { + attestingIndices, err := attestingIndices(ctx, state, att) + if err != nil { + log.WithError(err).Error("Could not get attesting indices") + return + } + for _, idx := range attestingIndices { + if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) { + logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data) + balance, err := state.BalanceAtIndex(types.ValidatorIndex(idx)) + if err != nil { + log.WithError(err).Error("Could not get balance") + return + } + + aggregatedPerf := s.aggregatedPerformance[types.ValidatorIndex(idx)] + aggregatedPerf.totalAttestedCount++ + aggregatedPerf.totalRequestedCount++ + + latestPerf := s.latestPerformance[types.ValidatorIndex(idx)] + balanceChg := balance - latestPerf.balance + latestPerf.balanceChange = balanceChg + latestPerf.balance = balance + latestPerf.attestedSlot = att.Data.Slot + latestPerf.inclusionSlot = state.Slot() + aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot) + + if state.Version() == version.Altair { + targetIdx := params.BeaconConfig().TimelyTargetFlagIndex + sourceIdx := params.BeaconConfig().TimelySourceFlagIndex + headIdx := params.BeaconConfig().TimelyHeadFlagIndex + + var participation []byte + if slots.ToEpoch(latestPerf.inclusionSlot) == + slots.ToEpoch(latestPerf.attestedSlot) { + participation, err = state.CurrentEpochParticipation() + if err != nil { + log.WithError(err).Error("Could not get current epoch participation") + return + } + } else { + participation, err = state.PreviousEpochParticipation() + if err != nil { + log.WithError(err).Error("Could not get previous epoch participation") + return + } + } + flags := participation[idx] + hasFlag, err := altair.HasValidatorFlag(flags, sourceIdx) + if err != nil { + log.WithError(err).Error("Could not get timely Source flag") + return + } + latestPerf.timelySource = hasFlag + hasFlag, err = altair.HasValidatorFlag(flags, headIdx) + if err != nil { + log.WithError(err).Error("Could not get timely Head flag") + return + } + latestPerf.timelyHead = hasFlag + hasFlag, err = altair.HasValidatorFlag(flags, targetIdx) + if err != nil { + log.WithError(err).Error("Could not get timely Target flag") + return + } + latestPerf.timelyTarget = hasFlag + + if latestPerf.timelySource { + aggregatedPerf.totalCorrectSource++ + } + if latestPerf.timelyHead { + aggregatedPerf.totalCorrectHead++ + } + if latestPerf.timelyTarget { + aggregatedPerf.totalCorrectTarget++ + } + } + logFields["CorrectHead"] = latestPerf.timelyHead + logFields["CorrectSource"] = latestPerf.timelySource + logFields["CorrectTarget"] = latestPerf.timelyTarget + logFields["InclusionSlot"] = latestPerf.inclusionSlot + logFields["NewBalance"] = balance + logFields["BalanceChange"] = balanceChg + + s.latestPerformance[types.ValidatorIndex(idx)] = latestPerf + s.aggregatedPerformance[types.ValidatorIndex(idx)] = aggregatedPerf + log.WithFields(logFields).Info("Attestation included") + } + } +} + +// processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our +// tracked validators +func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) { + root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot) + state := s.config.StateGen.StateByRootIfCachedNoCopy(root) + if state == nil { + log.Debug("Skipping unaggregated attestation due to state not found in cache") + return + } + attestingIndices, err := attestingIndices(ctx, state, att) + if err != nil { + log.WithError(err).Error("Could not get attesting indices") + return + } + for _, idx := range attestingIndices { + if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) { + logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data) + log.WithFields(logFields).Info("Processed unaggregated attestation") + } + } +} + +// processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated +// attestation from one of our tracked validators +func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) { + if s.TrackedIndex(att.AggregatorIndex) { + log.WithFields(logrus.Fields{ + "ValidatorIndex": att.AggregatorIndex, + }).Info("Processed attestation aggregation") + aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex] + aggregatedPerf.totalAggregations++ + s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf + } + + var root [32]byte + copy(root[:], att.Aggregate.Data.BeaconBlockRoot) + state := s.config.StateGen.StateByRootIfCachedNoCopy(root) + if state == nil { + log.Debug("Skipping agregated attestation due to state not found in cache") + return + } + attestingIndices, err := attestingIndices(ctx, state, att.Aggregate) + if err != nil { + log.WithError(err).Error("Could not get attesting indices") + return + } + for _, idx := range attestingIndices { + if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Aggregate.Data.Slot) { + logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Aggregate.Data) + log.WithFields(logFields).Info("Processed aggregated attestation") + } + } +} diff --git a/beacon-chain/monitor/process_attestation_test.go b/beacon-chain/monitor/process_attestation_test.go new file mode 100644 index 0000000000..f87d8929e5 --- /dev/null +++ b/beacon-chain/monitor/process_attestation_test.go @@ -0,0 +1,287 @@ +package monitor + +import ( + "bytes" + "context" + "testing" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/go-bitfield" + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" + "github.com/prysmaticlabs/prysm/testing/require" + "github.com/prysmaticlabs/prysm/testing/util" + "github.com/sirupsen/logrus" + logTest "github.com/sirupsen/logrus/hooks/test" +) + +func setupService(t *testing.T) *Service { + beaconDB := testDB.SetupDB(t) + + trackedVals := map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, + 12: nil, + } + latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{ + 1: { + balance: 32000000000, + }, + 2: { + balance: 32000000000, + }, + 12: { + balance: 31900000000, + }, + } + + aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ + 1: {}, + 2: {}, + 12: {}, + } + + return &Service{ + config: &ValidatorMonitorConfig{ + StateGen: stategen.New(beaconDB), + TrackedValidators: trackedVals, + }, + latestPerformance: latestPerformance, + aggregatedPerformance: aggregatedPerformance, + } +} + +func TestGetAttestingIndices(t *testing.T) { + ctx := context.Background() + beaconState, _ := util.DeterministicGenesisState(t, 256) + att := ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + }, + AggregationBits: bitfield.Bitlist{0b11, 0b1}, + } + attestingIndices, err := attestingIndices(ctx, beaconState, att) + require.NoError(t, err) + require.DeepEqual(t, attestingIndices, []uint64{0xc, 0x2}) + +} + +func TestProcessIncludedAttestationTwoTracked(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 256) + require.NoError(t, state.SetSlot(2)) + require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13))) + + att := ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32), + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + Target: ðpb.Checkpoint{ + Epoch: 1, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + }, + AggregationBits: bitfield.Bitlist{0b11, 0b1}, + } + s.processIncludedAttestation(context.Background(), state, att) + wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor" + wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor" + require.LogsContain(t, hook, wanted1) + require.LogsContain(t, hook, wanted2) +} + +func TestProcessUnaggregatedAttestationStateNotCached(t *testing.T) { + logrus.SetLevel(logrus.DebugLevel) + hook := logTest.NewGlobal() + ctx := context.Background() + + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 256) + require.NoError(t, state.SetSlot(2)) + header := state.LatestBlockHeader() + participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + require.NoError(t, state.SetCurrentParticipationBits(participation)) + + att := ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + BeaconBlockRoot: header.GetStateRoot(), + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + Target: ðpb.Checkpoint{ + Epoch: 1, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + }, + AggregationBits: bitfield.Bitlist{0b11, 0b1}, + } + s.processUnaggregatedAttestation(ctx, att) + require.LogsContain(t, hook, "Skipping unaggregated attestation due to state not found in cache") + logrus.SetLevel(logrus.InfoLevel) +} + +func TestProcessUnaggregatedAttestationStateCached(t *testing.T) { + ctx := context.Background() + hook := logTest.NewGlobal() + + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 256) + participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + require.NoError(t, state.SetCurrentParticipationBits(participation)) + + root := [32]byte{} + copy(root[:], "hello-world") + + att := ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + BeaconBlockRoot: root[:], + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: root[:], + }, + Target: ðpb.Checkpoint{ + Epoch: 1, + Root: root[:], + }, + }, + AggregationBits: bitfield.Bitlist{0b11, 0b1}, + } + require.NoError(t, s.config.StateGen.SaveState(ctx, root, state)) + s.processUnaggregatedAttestation(context.Background(), att) + wanted1 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor" + wanted2 := "\"Processed unaggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor" + require.LogsContain(t, hook, wanted1) + require.LogsContain(t, hook, wanted2) +} + +func TestProcessAggregatedAttestationStateNotCached(t *testing.T) { + logrus.SetLevel(logrus.DebugLevel) + hook := logTest.NewGlobal() + ctx := context.Background() + + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 256) + require.NoError(t, state.SetSlot(2)) + header := state.LatestBlockHeader() + participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + require.NoError(t, state.SetCurrentParticipationBits(participation)) + + att := ðpb.AggregateAttestationAndProof{ + AggregatorIndex: 2, + Aggregate: ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + BeaconBlockRoot: header.GetStateRoot(), + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + Target: ðpb.Checkpoint{ + Epoch: 1, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + }, + AggregationBits: bitfield.Bitlist{0b11, 0b1}, + }, + } + s.processAggregatedAttestation(ctx, att) + require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor") + require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache") + logrus.SetLevel(logrus.InfoLevel) +} + +func TestProcessAggregatedAttestationStateCached(t *testing.T) { + hook := logTest.NewGlobal() + ctx := context.Background() + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 256) + participation := []byte{0xff, 0xff, 0x01, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0} + require.NoError(t, state.SetCurrentParticipationBits(participation)) + + root := [32]byte{} + copy(root[:], "hello-world") + + att := ðpb.AggregateAttestationAndProof{ + AggregatorIndex: 2, + Aggregate: ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + BeaconBlockRoot: root[:], + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: root[:], + }, + Target: ðpb.Checkpoint{ + Epoch: 1, + Root: root[:], + }, + }, + AggregationBits: bitfield.Bitlist{0b10, 0b1}, + }, + } + + require.NoError(t, s.config.StateGen.SaveState(ctx, root, state)) + s.processAggregatedAttestation(ctx, att) + require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor") + require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor") + require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor") +} + +func TestProcessAttestations(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + ctx := context.Background() + state, _ := util.DeterministicGenesisStateAltair(t, 256) + require.NoError(t, state.SetSlot(2)) + require.NoError(t, state.SetCurrentParticipationBits(bytes.Repeat([]byte{0xff}, 13))) + + att := ðpb.Attestation{ + Data: ðpb.AttestationData{ + Slot: 1, + CommitteeIndex: 0, + BeaconBlockRoot: bytesutil.PadTo([]byte("hello-world"), 32), + Source: ðpb.Checkpoint{ + Epoch: 0, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + Target: ðpb.Checkpoint{ + Epoch: 1, + Root: bytesutil.PadTo([]byte("hello-world"), 32), + }, + }, + AggregationBits: bitfield.Bitlist{0b11, 0b1}, + } + + block := ðpb.BeaconBlockAltair{ + Slot: 2, + Body: ðpb.BeaconBlockBodyAltair{ + Attestations: []*ethpb.Attestation{att}, + }, + } + + wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block) + require.NoError(t, err) + s.processAttestations(ctx, state, wrappedBlock) + wanted1 := "\"Attestation included\" BalanceChange=0 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor" + wanted2 := "\"Attestation included\" BalanceChange=100000000 CorrectHead=true CorrectSource=true CorrectTarget=true Head=0x68656c6c6f2d InclusionSlot=2 NewBalance=32000000000 Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor" + require.LogsContain(t, hook, wanted1) + require.LogsContain(t, hook, wanted2) + +} diff --git a/beacon-chain/monitor/service.go b/beacon-chain/monitor/service.go index 33c41d0f67..10a78e7123 100644 --- a/beacon-chain/monitor/service.go +++ b/beacon-chain/monitor/service.go @@ -2,6 +2,7 @@ package monitor import ( types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" "github.com/sirupsen/logrus" ) @@ -9,17 +10,43 @@ var ( log = logrus.WithField("prefix", "monitor") ) +// ValidatorLatestPerformance keeps track of the latest participation of the validator +type ValidatorLatestPerformance struct { + attestedSlot types.Slot + inclusionSlot types.Slot + timelySource bool + timelyTarget bool + timelyHead bool + balance uint64 + balanceChange uint64 +} + +// ValidatorAggregatedPerformance keeps track of the accumulated performance of +// the validator since launch +type ValidatorAggregatedPerformance struct { + totalAttestedCount uint64 + totalRequestedCount uint64 + totalDistance uint64 + totalCorrectSource uint64 + totalCorrectTarget uint64 + totalCorrectHead uint64 + totalAggregations uint64 +} + // ValidatorMonitorConfig contains the list of validator indices that the // monitor service tracks, as well as the event feed notifier that the // monitor needs to subscribe. type ValidatorMonitorConfig struct { + StateGen stategen.StateManager TrackedValidators map[types.ValidatorIndex]interface{} } // Service is the main structure that tracks validators and reports logs and // metrics of their performances throughout their lifetime. type Service struct { - config *ValidatorMonitorConfig + config *ValidatorMonitorConfig + latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance + aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance } // TrackedIndex returns if the given validator index corresponds to one of the diff --git a/beacon-chain/state/stategen/getter.go b/beacon-chain/state/stategen/getter.go index b6a3dac8c0..6751f2eb4e 100644 --- a/beacon-chain/state/stategen/getter.go +++ b/beacon-chain/state/stategen/getter.go @@ -37,6 +37,15 @@ func (s *State) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, return has, nil } +// StateByRootIfCached retrieves a state using the input block root only if the state is already in the cache +func (s *State) StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState { + if !s.hotStateCache.has(blockRoot) { + return nil + } + state := s.hotStateCache.getWithoutCopy(blockRoot) + return state +} + // StateByRoot retrieves the state using input block root. func (s *State) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) { ctx, span := trace.StartSpan(ctx, "stateGen.StateByRoot") diff --git a/beacon-chain/state/stategen/getter_test.go b/beacon-chain/state/stategen/getter_test.go index c033d3f9da..6e49dcf391 100644 --- a/beacon-chain/state/stategen/getter_test.go +++ b/beacon-chain/state/stategen/getter_test.go @@ -56,6 +56,44 @@ func TestStateByRoot_ColdState(t *testing.T) { require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) } +func TestStateByRootIfCachedNoCopy_HotState(t *testing.T) { + ctx := context.Background() + beaconDB := testDB.SetupDB(t) + + service := New(beaconDB) + + beaconState, _ := util.DeterministicGenesisState(t, 32) + r := [32]byte{'A'} + require.NoError(t, service.beaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: r[:]})) + service.hotStateCache.put(r, beaconState) + + loadedState := service.StateByRootIfCachedNoCopy(r) + require.DeepSSZEqual(t, loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) +} + +func TestStateByRootIfCachedNoCopy_ColdState(t *testing.T) { + ctx := context.Background() + beaconDB := testDB.SetupDB(t) + + service := New(beaconDB) + service.finalizedInfo.slot = 2 + service.slotsPerArchivedPoint = 1 + + b := util.NewBeaconBlock() + b.Block.Slot = 1 + require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b))) + bRoot, err := b.Block.HashTreeRoot() + require.NoError(t, err) + beaconState, _ := util.DeterministicGenesisState(t, 32) + require.NoError(t, beaconState.SetSlot(1)) + require.NoError(t, service.beaconDB.SaveState(ctx, beaconState, bRoot)) + require.NoError(t, service.beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(b))) + require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, bRoot)) + loadedState := service.StateByRootIfCachedNoCopy(bRoot) + require.NoError(t, err) + require.Equal(t, loadedState, nil) +} + func TestStateByRoot_HotStateUsingEpochBoundaryCacheNoReplay(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) diff --git a/beacon-chain/state/stategen/mock.go b/beacon-chain/state/stategen/mock.go index 768dc43c53..22d6e843b5 100644 --- a/beacon-chain/state/stategen/mock.go +++ b/beacon-chain/state/stategen/mock.go @@ -23,6 +23,11 @@ func NewMockService() *MockStateManager { } } +// StateByRootIfCached +func (m *MockStateManager) StateByRootIfCachedNoCopy(_ [32]byte) state.BeaconState { // skipcq: RVV-B0013 + panic("implement me") +} + // Resume -- func (m *MockStateManager) Resume(_ context.Context, _ state.BeaconState) (state.BeaconState, error) { panic("implement me") diff --git a/beacon-chain/state/stategen/service.go b/beacon-chain/state/stategen/service.go index dee100f9d2..69dce996ee 100644 --- a/beacon-chain/state/stategen/service.go +++ b/beacon-chain/state/stategen/service.go @@ -31,6 +31,7 @@ type StateManager interface { HasState(ctx context.Context, blockRoot [32]byte) (bool, error) HasStateInCache(ctx context.Context, blockRoot [32]byte) (bool, error) StateByRoot(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) + StateByRootIfCachedNoCopy(blockRoot [32]byte) state.BeaconState StateByRootInitialSync(ctx context.Context, blockRoot [32]byte) (state.BeaconState, error) StateBySlot(ctx context.Context, slot types.Slot) (state.BeaconState, error) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*ethpb.StateSummary, error) From ee52f8dff32cf9b300feab7ab79060ca92c1fc18 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Thu, 18 Nov 2021 23:11:54 -0500 Subject: [PATCH 08/45] Implement Validator Standard Key Manager API Delete Keystores (#9886) * begin * implement delete and filter export history * rem deleted code * delete keystores all tests * gaz * test * double import fix * test * surface errors to user * add in changes * edit proto * edit * del * tests * gaz * slice * duplicate key found in request --- proto/eth/service/key_management.pb.go | 24 ++-- proto/eth/service/key_management.proto | 3 +- validator/accounts/BUILD.bazel | 1 + validator/accounts/accounts_delete.go | 54 +++----- validator/keymanager/BUILD.bazel | 1 + validator/keymanager/derived/BUILD.bazel | 1 + validator/keymanager/derived/keymanager.go | 9 +- validator/keymanager/imported/BUILD.bazel | 4 + validator/keymanager/imported/delete.go | 93 +++++++++++++ validator/keymanager/imported/delete_test.go | 126 ++++++++++++++++++ validator/keymanager/imported/keymanager.go | 48 ------- .../keymanager/imported/keymanager_test.go | 52 -------- validator/keymanager/types.go | 35 ++++- validator/keymanager/types_test.go | 7 + validator/rpc/BUILD.bazel | 1 + validator/rpc/standard_api.go | 42 +++++- validator/rpc/standard_api_test.go | 104 ++++++++++++++- .../slashing-protection-history/export.go | 16 ++- .../round_trip_test.go | 37 +++++ 19 files changed, 505 insertions(+), 153 deletions(-) create mode 100644 validator/keymanager/imported/delete.go create mode 100644 validator/keymanager/imported/delete_test.go diff --git a/proto/eth/service/key_management.pb.go b/proto/eth/service/key_management.pb.go index 90d4355ca4..b8a305b8eb 100755 --- a/proto/eth/service/key_management.pb.go +++ b/proto/eth/service/key_management.pb.go @@ -87,9 +87,10 @@ func (ImportedKeystoreStatus_Status) EnumDescriptor() ([]byte, []int) { type DeletedKeystoreStatus_Status int32 const ( - DeletedKeystoreStatus_DELETED DeletedKeystoreStatus_Status = 0 - DeletedKeystoreStatus_NOT_FOUND DeletedKeystoreStatus_Status = 1 - DeletedKeystoreStatus_ERROR DeletedKeystoreStatus_Status = 2 + DeletedKeystoreStatus_DELETED DeletedKeystoreStatus_Status = 0 + DeletedKeystoreStatus_NOT_FOUND DeletedKeystoreStatus_Status = 1 + DeletedKeystoreStatus_NOT_ACTIVE DeletedKeystoreStatus_Status = 2 + DeletedKeystoreStatus_ERROR DeletedKeystoreStatus_Status = 3 ) // Enum value maps for DeletedKeystoreStatus_Status. @@ -97,12 +98,14 @@ var ( DeletedKeystoreStatus_Status_name = map[int32]string{ 0: "DELETED", 1: "NOT_FOUND", - 2: "ERROR", + 2: "NOT_ACTIVE", + 3: "ERROR", } DeletedKeystoreStatus_Status_value = map[string]int32{ - "DELETED": 0, - "NOT_FOUND": 1, - "ERROR": 2, + "DELETED": 0, + "NOT_FOUND": 1, + "NOT_ACTIVE": 2, + "ERROR": 3, } ) @@ -625,7 +628,7 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{ 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x30, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x44, 0x55, 0x50, 0x4c, 0x49, 0x43, 0x41, 0x54, 0x45, 0x10, 0x01, 0x12, 0x09, - 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0xae, 0x01, 0x0a, 0x15, 0x44, 0x65, + 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x22, 0xbe, 0x01, 0x0a, 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x4a, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x32, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, @@ -633,10 +636,11 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{ 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, - 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2f, 0x0a, 0x06, 0x53, 0x74, 0x61, + 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x3f, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46, 0x4f, 0x55, 0x4e, 0x44, 0x10, 0x01, 0x12, - 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x32, 0xb9, 0x03, 0x0a, 0x0d, 0x4b, + 0x0e, 0x0a, 0x0a, 0x4e, 0x4f, 0x54, 0x5f, 0x41, 0x43, 0x54, 0x49, 0x56, 0x45, 0x10, 0x02, 0x12, + 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x32, 0xb9, 0x03, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x78, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, diff --git a/proto/eth/service/key_management.proto b/proto/eth/service/key_management.proto index 52e8790dfe..94da434e70 100644 --- a/proto/eth/service/key_management.proto +++ b/proto/eth/service/key_management.proto @@ -130,7 +130,8 @@ message DeletedKeystoreStatus { enum Status { DELETED = 0; NOT_FOUND = 1; - ERROR = 2; + NOT_ACTIVE = 2; + ERROR = 3; } Status status = 1; string message = 2; diff --git a/validator/accounts/BUILD.bazel b/validator/accounts/BUILD.bazel index 3b0f40c094..4da748bce8 100644 --- a/validator/accounts/BUILD.bazel +++ b/validator/accounts/BUILD.bazel @@ -32,6 +32,7 @@ go_library( "//encoding/bytesutil:go_default_library", "//io/file:go_default_library", "//io/prompt:go_default_library", + "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//validator/accounts/iface:go_default_library", "//validator/accounts/petnames:go_default_library", diff --git a/validator/accounts/accounts_delete.go b/validator/accounts/accounts_delete.go index 1270458892..936f2b6fb1 100644 --- a/validator/accounts/accounts_delete.go +++ b/validator/accounts/accounts_delete.go @@ -10,12 +10,11 @@ import ( "github.com/prysmaticlabs/prysm/cmd/validator/flags" "github.com/prysmaticlabs/prysm/encoding/bytesutil" "github.com/prysmaticlabs/prysm/io/prompt" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" "github.com/prysmaticlabs/prysm/validator/accounts/iface" "github.com/prysmaticlabs/prysm/validator/accounts/userprompt" "github.com/prysmaticlabs/prysm/validator/accounts/wallet" "github.com/prysmaticlabs/prysm/validator/keymanager" - "github.com/prysmaticlabs/prysm/validator/keymanager/derived" - "github.com/prysmaticlabs/prysm/validator/keymanager/imported" "github.com/urfave/cli/v2" ) @@ -102,37 +101,28 @@ func DeleteAccountCli(cliCtx *cli.Context) error { // DeleteAccount deletes the accounts that the user requests to be deleted from the wallet. func DeleteAccount(ctx context.Context, cfg *Config) error { - switch cfg.Wallet.KeymanagerKind() { - case keymanager.Remote: - return errors.New("cannot delete accounts for a remote keymanager") - case keymanager.Imported: - km, ok := cfg.Keymanager.(*imported.Keymanager) - if !ok { - return errors.New("not a imported keymanager") + deleter, ok := cfg.Keymanager.(keymanager.Deleter) + if !ok { + return errors.New("keymanager does not implement Deleter interface") + } + if len(cfg.DeletePublicKeys) == 1 { + log.Info("Deleting account...") + } else { + log.Info("Deleting accounts...") + } + statuses, err := deleter.DeleteKeystores(ctx, cfg.DeletePublicKeys) + if err != nil { + return errors.Wrap(err, "could not delete accounts") + } + for i, status := range statuses { + switch status.Status { + case ethpbservice.DeletedKeystoreStatus_ERROR: + log.Errorf("Error deleting key %#x: %s", bytesutil.Trunc(cfg.DeletePublicKeys[i]), status.Message) + case ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE: + log.Warnf("Duplicate key %#x found in delete request", bytesutil.Trunc(cfg.DeletePublicKeys[i])) + case ethpbservice.DeletedKeystoreStatus_NOT_FOUND: + log.Warnf("Could not find keystore for %#x", bytesutil.Trunc(cfg.DeletePublicKeys[i])) } - if len(cfg.DeletePublicKeys) == 1 { - log.Info("Deleting account...") - } else { - log.Info("Deleting accounts...") - } - if err := km.DeleteAccounts(ctx, cfg.DeletePublicKeys); err != nil { - return errors.Wrap(err, "could not delete accounts") - } - case keymanager.Derived: - km, ok := cfg.Keymanager.(*derived.Keymanager) - if !ok { - return errors.New("not a derived keymanager") - } - if len(cfg.DeletePublicKeys) == 1 { - log.Info("Deleting account...") - } else { - log.Info("Deleting accounts...") - } - if err := km.DeleteAccounts(ctx, cfg.DeletePublicKeys); err != nil { - return errors.Wrap(err, "could not delete accounts") - } - default: - return fmt.Errorf(errKeymanagerNotSupported, cfg.Wallet.KeymanagerKind()) } return nil } diff --git a/validator/keymanager/BUILD.bazel b/validator/keymanager/BUILD.bazel index 3e67d31934..945cfbde07 100644 --- a/validator/keymanager/BUILD.bazel +++ b/validator/keymanager/BUILD.bazel @@ -15,6 +15,7 @@ go_library( deps = [ "//async/event:go_default_library", "//crypto/bls:go_default_library", + "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1/validator-client:go_default_library", ], ) diff --git a/validator/keymanager/derived/BUILD.bazel b/validator/keymanager/derived/BUILD.bazel index ba6aee0708..47101ee8ed 100644 --- a/validator/keymanager/derived/BUILD.bazel +++ b/validator/keymanager/derived/BUILD.bazel @@ -17,6 +17,7 @@ go_library( "//crypto/bls:go_default_library", "//crypto/rand:go_default_library", "//io/prompt:go_default_library", + "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1/validator-client:go_default_library", "//validator/accounts/iface:go_default_library", "//validator/keymanager:go_default_library", diff --git a/validator/keymanager/derived/keymanager.go b/validator/keymanager/derived/keymanager.go index a951c240e9..f2d44959fa 100644 --- a/validator/keymanager/derived/keymanager.go +++ b/validator/keymanager/derived/keymanager.go @@ -7,6 +7,7 @@ import ( "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/async/event" "github.com/prysmaticlabs/prysm/crypto/bls" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client" "github.com/prysmaticlabs/prysm/validator/accounts/iface" "github.com/prysmaticlabs/prysm/validator/keymanager" @@ -106,9 +107,11 @@ func (km *Keymanager) FetchValidatingPrivateKeys(ctx context.Context) ([][32]byt return km.importedKM.FetchValidatingPrivateKeys(ctx) } -// DeleteAccounts for a derived keymanager. -func (km *Keymanager) DeleteAccounts(ctx context.Context, publicKeys [][]byte) error { - return km.importedKM.DeleteAccounts(ctx, publicKeys) +// DeleteKeystores for a derived keymanager. +func (km *Keymanager) DeleteKeystores( + ctx context.Context, publicKeys [][]byte, +) ([]*ethpbservice.DeletedKeystoreStatus, error) { + return km.importedKM.DeleteKeystores(ctx, publicKeys) } // SubscribeAccountChanges creates an event subscription for a channel diff --git a/validator/keymanager/imported/BUILD.bazel b/validator/keymanager/imported/BUILD.bazel index c184ef7892..abd80be186 100644 --- a/validator/keymanager/imported/BUILD.bazel +++ b/validator/keymanager/imported/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "backup.go", + "delete.go", "doc.go", "import.go", "keymanager.go", @@ -24,6 +25,7 @@ go_library( "//encoding/bytesutil:go_default_library", "//io/file:go_default_library", "//io/prompt:go_default_library", + "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1/validator-client:go_default_library", "//runtime/interop:go_default_library", "//validator/accounts/iface:go_default_library", @@ -44,6 +46,7 @@ go_test( name = "go_default_test", srcs = [ "backup_test.go", + "delete_test.go", "import_test.go", "keymanager_test.go", "refresh_test.go", @@ -53,6 +56,7 @@ go_test( "//async/event:go_default_library", "//crypto/bls:go_default_library", "//encoding/bytesutil:go_default_library", + "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1/validator-client:go_default_library", "//testing/assert:go_default_library", "//testing/require:go_default_library", diff --git a/validator/keymanager/imported/delete.go b/validator/keymanager/imported/delete.go new file mode 100644 index 0000000000..83f0a9cefc --- /dev/null +++ b/validator/keymanager/imported/delete.go @@ -0,0 +1,93 @@ +package imported + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" + "github.com/sirupsen/logrus" +) + +// DeleteKeystores takes in public keys and removes the accounts from the wallet. +// This includes their disk keystore and cached keystore, but maintains the slashing +// protection history in the database. +func (km *Keymanager) DeleteKeystores( + ctx context.Context, publicKeys [][]byte, +) ([]*ethpbservice.DeletedKeystoreStatus, error) { + // Check for duplicate keys and filter them out. + trackedPublicKeys := make(map[[48]byte]bool) + statuses := make([]*ethpbservice.DeletedKeystoreStatus, 0, len(publicKeys)) + var store *AccountsKeystoreRepresentation + var err error + deletedKeys := make([][]byte, 0, len(publicKeys)) + for _, publicKey := range publicKeys { + // Check if the key in the request is a duplicate. + if _, ok := trackedPublicKeys[bytesutil.ToBytes48(publicKey)]; ok { + statuses = append(statuses, ðpbservice.DeletedKeystoreStatus{ + Status: ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE, + }) + continue + } + var index int + var found bool + for j, pubKey := range km.accountsStore.PublicKeys { + if bytes.Equal(pubKey, publicKey) { + index = j + found = true + break + } + } + if !found { + statuses = append(statuses, ðpbservice.DeletedKeystoreStatus{ + Status: ethpbservice.DeletedKeystoreStatus_NOT_FOUND, + }) + continue + } + deletedPublicKey := km.accountsStore.PublicKeys[index] + deletedKeys = append(deletedKeys, deletedPublicKey) + km.accountsStore.PrivateKeys = append(km.accountsStore.PrivateKeys[:index], km.accountsStore.PrivateKeys[index+1:]...) + km.accountsStore.PublicKeys = append(km.accountsStore.PublicKeys[:index], km.accountsStore.PublicKeys[index+1:]...) + store, err = km.CreateAccountsKeystore(ctx, km.accountsStore.PrivateKeys, km.accountsStore.PublicKeys) + if err != nil { + return nil, errors.Wrap(err, "could not rewrite accounts keystore") + } + statuses = append(statuses, ðpbservice.DeletedKeystoreStatus{ + Status: ethpbservice.DeletedKeystoreStatus_DELETED, + }) + trackedPublicKeys[bytesutil.ToBytes48(publicKey)] = true + } + if len(deletedKeys) == 0 { + return statuses, nil + } + var deletedKeysStr string + for i, k := range deletedKeys { + if i == 0 { + deletedKeysStr += fmt.Sprintf("%#x", bytesutil.Trunc(k)) + } else if i == len(deletedKeys)-1 { + deletedKeysStr += fmt.Sprintf("%#x", bytesutil.Trunc(k)) + } else { + deletedKeysStr += fmt.Sprintf(",%#x", bytesutil.Trunc(k)) + } + } + log.WithFields(logrus.Fields{ + "publicKeys": deletedKeysStr, + }).Info("Successfully deleted validator key(s)") + + // Write the encoded keystore. + encoded, err := json.MarshalIndent(store, "", "\t") + if err != nil { + return nil, err + } + if err := km.wallet.WriteFileAtPath(ctx, AccountsPath, AccountsKeystoreFileName, encoded); err != nil { + return nil, errors.Wrap(err, "could not write keystore file for accounts") + } + err = km.initializeKeysCachesFromKeystore() + if err != nil { + return nil, errors.Wrap(err, "failed to initialize key caches") + } + return statuses, nil +} diff --git a/validator/keymanager/imported/delete_test.go b/validator/keymanager/imported/delete_test.go new file mode 100644 index 0000000000..534725aee3 --- /dev/null +++ b/validator/keymanager/imported/delete_test.go @@ -0,0 +1,126 @@ +package imported + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "testing" + + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" + "github.com/prysmaticlabs/prysm/testing/require" + mock "github.com/prysmaticlabs/prysm/validator/accounts/testing" + "github.com/prysmaticlabs/prysm/validator/keymanager" + logTest "github.com/sirupsen/logrus/hooks/test" + keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" +) + +func TestImportedKeymanager_DeleteKeystores(t *testing.T) { + hook := logTest.NewGlobal() + wallet := &mock.Wallet{ + Files: make(map[string]map[string][]byte), + WalletPassword: password, + } + dr := &Keymanager{ + wallet: wallet, + accountsStore: &accountStore{}, + } + numAccounts := 5 + ctx := context.Background() + keystores := make([]*keymanager.Keystore, numAccounts) + for i := 0; i < numAccounts; i++ { + keystores[i] = createRandomKeystore(t, password) + } + require.NoError(t, dr.ImportKeystores(ctx, keystores, password)) + accounts, err := dr.FetchValidatingPublicKeys(ctx) + require.NoError(t, err) + require.Equal(t, numAccounts, len(accounts)) + + t.Run("keys not found", func(t *testing.T) { + notFoundPubKey := [48]byte{1, 2, 3} + notFoundPubKey2 := [48]byte{4, 5, 6} + statuses, err := dr.DeleteKeystores(ctx, [][]byte{notFoundPubKey[:], notFoundPubKey2[:]}) + require.NoError(t, err) + require.Equal(t, 2, len(statuses)) + require.Equal(t, ethpbservice.DeletedKeystoreStatus_NOT_FOUND, statuses[0].Status) + require.Equal(t, ethpbservice.DeletedKeystoreStatus_NOT_FOUND, statuses[1].Status) + }) + t.Run("deletes properly", func(t *testing.T) { + accountToRemove := uint64(2) + accountPubKey := accounts[accountToRemove] + statuses, err := dr.DeleteKeystores(ctx, [][]byte{accountPubKey[:]}) + require.NoError(t, err) + + require.Equal(t, 1, len(statuses)) + require.Equal(t, ethpbservice.DeletedKeystoreStatus_DELETED, statuses[0].Status) + + // Ensure the keystore file was written to the wallet + // and ensure we can decrypt it using the EIP-2335 standard. + var encodedKeystore []byte + for k, v := range wallet.Files[AccountsPath] { + if strings.Contains(k, "keystore") { + encodedKeystore = v + } + } + require.NotNil(t, encodedKeystore, "could not find keystore file") + keystoreFile := &keymanager.Keystore{} + require.NoError(t, json.Unmarshal(encodedKeystore, keystoreFile)) + + // We extract the accounts from the keystore. + decryptor := keystorev4.New() + encodedAccounts, err := decryptor.Decrypt(keystoreFile.Crypto, password) + require.NoError(t, err, "Could not decrypt validator accounts") + store := &accountStore{} + require.NoError(t, json.Unmarshal(encodedAccounts, store)) + + require.Equal(t, numAccounts-1, len(store.PublicKeys)) + require.Equal(t, numAccounts-1, len(store.PrivateKeys)) + require.LogsContain(t, hook, fmt.Sprintf("%#x", bytesutil.Trunc(accountPubKey[:]))) + require.LogsContain(t, hook, "Successfully deleted validator key(s)") + }) + t.Run("returns NOT_ACTIVE status for duplicate public key in request", func(t *testing.T) { + accountToRemove := uint64(3) + accountPubKey := accounts[accountToRemove] + statuses, err := dr.DeleteKeystores(ctx, [][]byte{ + accountPubKey[:], + accountPubKey[:], // Add in the same key a few more times. + accountPubKey[:], + accountPubKey[:], + }) + require.NoError(t, err) + + require.Equal(t, 4, len(statuses)) + for i, st := range statuses { + if i == 0 { + require.Equal(t, ethpbservice.DeletedKeystoreStatus_DELETED, st.Status) + } else { + require.Equal(t, ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE, st.Status) + } + } + + // Ensure the keystore file was written to the wallet + // and ensure we can decrypt it using the EIP-2335 standard. + var encodedKeystore []byte + for k, v := range wallet.Files[AccountsPath] { + if strings.Contains(k, "keystore") { + encodedKeystore = v + } + } + require.NotNil(t, encodedKeystore, "could not find keystore file") + keystoreFile := &keymanager.Keystore{} + require.NoError(t, json.Unmarshal(encodedKeystore, keystoreFile)) + + // We extract the accounts from the keystore. + decryptor := keystorev4.New() + encodedAccounts, err := decryptor.Decrypt(keystoreFile.Crypto, password) + require.NoError(t, err, "Could not decrypt validator accounts") + store := &accountStore{} + require.NoError(t, json.Unmarshal(encodedAccounts, store)) + + require.Equal(t, numAccounts-2, len(store.PublicKeys)) + require.Equal(t, numAccounts-2, len(store.PrivateKeys)) + require.LogsContain(t, hook, fmt.Sprintf("%#x", bytesutil.Trunc(accountPubKey[:]))) + require.LogsContain(t, hook, "Successfully deleted validator key(s)") + }) +} diff --git a/validator/keymanager/imported/keymanager.go b/validator/keymanager/imported/keymanager.go index 970a5010a1..57fa4ff7ec 100644 --- a/validator/keymanager/imported/keymanager.go +++ b/validator/keymanager/imported/keymanager.go @@ -1,7 +1,6 @@ package imported import ( - "bytes" "context" "encoding/json" "fmt" @@ -18,7 +17,6 @@ import ( "github.com/prysmaticlabs/prysm/validator/accounts/iface" "github.com/prysmaticlabs/prysm/validator/accounts/petnames" "github.com/prysmaticlabs/prysm/validator/keymanager" - "github.com/sirupsen/logrus" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" "go.opencensus.io/trace" ) @@ -158,52 +156,6 @@ func (km *Keymanager) initializeKeysCachesFromKeystore() error { return nil } -// DeleteAccounts takes in public keys and removes the accounts entirely. This includes their disk keystore and cached keystore. -func (km *Keymanager) DeleteAccounts(ctx context.Context, publicKeys [][]byte) error { - for _, publicKey := range publicKeys { - var index int - var found bool - for i, pubKey := range km.accountsStore.PublicKeys { - if bytes.Equal(pubKey, publicKey) { - index = i - found = true - break - } - } - if !found { - return fmt.Errorf("could not find public key %#x", publicKey) - } - deletedPublicKey := km.accountsStore.PublicKeys[index] - accountName := petnames.DeterministicName(deletedPublicKey, "-") - km.accountsStore.PrivateKeys = append(km.accountsStore.PrivateKeys[:index], km.accountsStore.PrivateKeys[index+1:]...) - km.accountsStore.PublicKeys = append(km.accountsStore.PublicKeys[:index], km.accountsStore.PublicKeys[index+1:]...) - - newStore, err := km.CreateAccountsKeystore(ctx, km.accountsStore.PrivateKeys, km.accountsStore.PublicKeys) - if err != nil { - return errors.Wrap(err, "could not rewrite accounts keystore") - } - - // Write the encoded keystore. - encoded, err := json.MarshalIndent(newStore, "", "\t") - if err != nil { - return err - } - if err := km.wallet.WriteFileAtPath(ctx, AccountsPath, AccountsKeystoreFileName, encoded); err != nil { - return errors.Wrap(err, "could not write keystore file for accounts") - } - - log.WithFields(logrus.Fields{ - "name": accountName, - "publicKey": fmt.Sprintf("%#x", bytesutil.Trunc(deletedPublicKey)), - }).Info("Successfully deleted validator account") - err = km.initializeKeysCachesFromKeystore() - if err != nil { - return errors.Wrap(err, "failed to initialize keys caches") - } - } - return nil -} - // FetchValidatingPublicKeys fetches the list of active public keys from the imported account keystores. func (km *Keymanager) FetchValidatingPublicKeys(ctx context.Context) ([][48]byte, error) { ctx, span := trace.StartSpan(ctx, "keymanager.FetchValidatingPublicKeys") diff --git a/validator/keymanager/imported/keymanager_test.go b/validator/keymanager/imported/keymanager_test.go index 5add2eff47..194e11a0e5 100644 --- a/validator/keymanager/imported/keymanager_test.go +++ b/validator/keymanager/imported/keymanager_test.go @@ -3,7 +3,6 @@ package imported import ( "context" "encoding/json" - "fmt" "strings" "testing" @@ -14,60 +13,9 @@ import ( "github.com/prysmaticlabs/prysm/testing/require" mock "github.com/prysmaticlabs/prysm/validator/accounts/testing" "github.com/prysmaticlabs/prysm/validator/keymanager" - logTest "github.com/sirupsen/logrus/hooks/test" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" ) -func TestImportedKeymanager_RemoveAccounts(t *testing.T) { - hook := logTest.NewGlobal() - wallet := &mock.Wallet{ - Files: make(map[string]map[string][]byte), - WalletPassword: password, - } - dr := &Keymanager{ - wallet: wallet, - accountsStore: &accountStore{}, - } - numAccounts := 5 - ctx := context.Background() - keystores := make([]*keymanager.Keystore, numAccounts) - for i := 0; i < numAccounts; i++ { - keystores[i] = createRandomKeystore(t, password) - } - require.NoError(t, dr.ImportKeystores(ctx, keystores, password)) - accounts, err := dr.FetchValidatingPublicKeys(ctx) - require.NoError(t, err) - require.Equal(t, numAccounts, len(accounts)) - - accountToRemove := uint64(2) - accountPubKey := accounts[accountToRemove] - // Remove an account from the keystore. - require.NoError(t, dr.DeleteAccounts(ctx, [][]byte{accountPubKey[:]})) - // Ensure the keystore file was written to the wallet - // and ensure we can decrypt it using the EIP-2335 standard. - var encodedKeystore []byte - for k, v := range wallet.Files[AccountsPath] { - if strings.Contains(k, "keystore") { - encodedKeystore = v - } - } - require.NotNil(t, encodedKeystore, "could not find keystore file") - keystoreFile := &keymanager.Keystore{} - require.NoError(t, json.Unmarshal(encodedKeystore, keystoreFile)) - - // We extract the accounts from the keystore. - decryptor := keystorev4.New() - encodedAccounts, err := decryptor.Decrypt(keystoreFile.Crypto, password) - require.NoError(t, err, "Could not decrypt validator accounts") - store := &accountStore{} - require.NoError(t, json.Unmarshal(encodedAccounts, store)) - - require.Equal(t, numAccounts-1, len(store.PublicKeys)) - require.Equal(t, numAccounts-1, len(store.PrivateKeys)) - require.LogsContain(t, hook, fmt.Sprintf("%#x", bytesutil.Trunc(accountPubKey[:]))) - require.LogsContain(t, hook, "Successfully deleted validator account") -} - func TestImportedKeymanager_FetchValidatingPublicKeys(t *testing.T) { wallet := &mock.Wallet{ Files: make(map[string]map[string][]byte), diff --git a/validator/keymanager/types.go b/validator/keymanager/types.go index e2bcb047fc..69fe29e392 100644 --- a/validator/keymanager/types.go +++ b/validator/keymanager/types.go @@ -6,16 +6,45 @@ import ( "github.com/prysmaticlabs/prysm/async/event" "github.com/prysmaticlabs/prysm/crypto/bls" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client" ) // IKeymanager defines a general keymanager interface for Prysm wallets. type IKeymanager interface { - // FetchValidatingPublicKeys fetches the list of active public keys that should be used to validate with. + PublicKeysFetcher + Signer + KeyChangeSubscriber +} + +// KeysFetcher for validating private and public keys. +type KeysFetcher interface { + FetchValidatingPrivateKeys(ctx context.Context) ([][32]byte, error) + PublicKeysFetcher +} + +// PublicKeysFetcher for validating public keys. +type PublicKeysFetcher interface { FetchValidatingPublicKeys(ctx context.Context) ([][48]byte, error) - // Sign signs a message using a validator key. +} + +// Signer allows signing messages using a validator private key. +type Signer interface { Sign(context.Context, *validatorpb.SignRequest) (bls.Signature, error) - // SubscribeAccountChanges subscribes to changes made to the underlying keys. +} + +// Importer can import new keystores into the keymanager. +type Importer interface { + ImportKeystores(ctx context.Context, keystores []*Keystore, importsPassword string) error +} + +// Deleter can delete keystores from the keymanager. +type Deleter interface { + DeleteKeystores(ctx context.Context, publicKeys [][]byte) ([]*ethpbservice.DeletedKeystoreStatus, error) +} + +// KeyChangeSubscriber allows subscribing to changes made to the underlying keys. +type KeyChangeSubscriber interface { SubscribeAccountChanges(pubKeysChan chan [][48]byte) event.Subscription } diff --git a/validator/keymanager/types_test.go b/validator/keymanager/types_test.go index 315692086c..71e765875e 100644 --- a/validator/keymanager/types_test.go +++ b/validator/keymanager/types_test.go @@ -11,4 +11,11 @@ var ( _ = keymanager.IKeymanager(&imported.Keymanager{}) _ = keymanager.IKeymanager(&derived.Keymanager{}) _ = keymanager.IKeymanager(&remote.Keymanager{}) + + // More granular assertions. + _ = keymanager.KeysFetcher(&imported.Keymanager{}) + _ = keymanager.KeysFetcher(&derived.Keymanager{}) + _ = keymanager.Importer(&imported.Keymanager{}) + _ = keymanager.Deleter(&imported.Keymanager{}) + _ = keymanager.Deleter(&derived.Keymanager{}) ) diff --git a/validator/rpc/BUILD.bazel b/validator/rpc/BUILD.bazel index 7480a7c684..f6b202a468 100644 --- a/validator/rpc/BUILD.bazel +++ b/validator/rpc/BUILD.bazel @@ -91,6 +91,7 @@ go_test( "//crypto/rand:go_default_library", "//encoding/bytesutil:go_default_library", "//io/file:go_default_library", + "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//proto/prysm/v1alpha1/validator-client:go_default_library", "//testing/assert:go_default_library", diff --git a/validator/rpc/standard_api.go b/validator/rpc/standard_api.go index 1b92542a35..a5433eb5c7 100644 --- a/validator/rpc/standard_api.go +++ b/validator/rpc/standard_api.go @@ -2,18 +2,20 @@ package rpc import ( "context" + "encoding/json" "fmt" "github.com/golang/protobuf/ptypes/empty" ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/prysmaticlabs/prysm/validator/keymanager/derived" + slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection-history" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) // ListKeystores implements the standard validator key management API. -func (s Server) ListKeystores( +func (s *Server) ListKeystores( ctx context.Context, _ *empty.Empty, ) (*ethpbservice.ListKeystoresResponse, error) { if !s.walletInitialized { @@ -36,3 +38,41 @@ func (s Server) ListKeystores( Keystores: keystoreResponse, }, nil } + +// DeleteKeystores allows for deleting specified public keys from Prysm. +func (s *Server) DeleteKeystores( + ctx context.Context, req *ethpbservice.DeleteKeystoresRequest, +) (*ethpbservice.DeleteKeystoresResponse, error) { + if !s.walletInitialized { + return nil, status.Error(codes.Internal, "Wallet not ready") + } + deleter, ok := s.keymanager.(keymanager.Deleter) + if !ok { + return nil, status.Error(codes.Internal, "Keymanager kind cannot delete keys") + } + statuses, err := deleter.DeleteKeystores(ctx, req.PublicKeys) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not delete keys: %v", err) + } + keysToFilter := req.PublicKeys + exportedHistory, err := slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, keysToFilter...) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not export slashing protection history: %v", + err, + ) + } + jsonHist, err := json.Marshal(exportedHistory) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not export slashing protection history: %v", + err, + ) + } + return ðpbservice.DeleteKeystoresResponse{ + Statuses: statuses, + SlashingProtection: string(jsonHist), + }, nil +} diff --git a/validator/rpc/standard_api_test.go b/validator/rpc/standard_api_test.go index bdbdeb3840..de717e0d5c 100644 --- a/validator/rpc/standard_api_test.go +++ b/validator/rpc/standard_api_test.go @@ -2,17 +2,21 @@ package rpc import ( "context" + "encoding/json" "fmt" "testing" "github.com/golang/protobuf/ptypes/empty" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" + validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client" "github.com/prysmaticlabs/prysm/testing/require" "github.com/prysmaticlabs/prysm/validator/accounts" "github.com/prysmaticlabs/prysm/validator/accounts/iface" "github.com/prysmaticlabs/prysm/validator/accounts/wallet" + "github.com/prysmaticlabs/prysm/validator/db/kv" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/prysmaticlabs/prysm/validator/keymanager/derived" - constant "github.com/prysmaticlabs/prysm/validator/testing" + mocks "github.com/prysmaticlabs/prysm/validator/testing" ) func TestServer_ListKeystores(t *testing.T) { @@ -52,7 +56,7 @@ func TestServer_ListKeystores(t *testing.T) { numAccounts := 50 dr, ok := km.(*derived.Keymanager) require.Equal(t, true, ok) - err = dr.RecoverAccountsFromMnemonic(ctx, constant.TestMnemonic, "", numAccounts) + err = dr.RecoverAccountsFromMnemonic(ctx, mocks.TestMnemonic, "", numAccounts) require.NoError(t, err) expectedKeys, err := dr.FetchValidatingPublicKeys(ctx) require.NoError(t, err) @@ -71,3 +75,99 @@ func TestServer_ListKeystores(t *testing.T) { } }) } + +func TestServer_DeleteKeystores(t *testing.T) { + ctx := context.Background() + t.Run("wallet not ready", func(t *testing.T) { + s := Server{} + _, err := s.DeleteKeystores(context.Background(), nil) + require.ErrorContains(t, "Wallet not ready", err) + }) + localWalletDir := setupWalletDir(t) + defaultWalletPath = localWalletDir + w, err := accounts.CreateWalletWithKeymanager(ctx, &accounts.CreateWalletConfig{ + WalletCfg: &wallet.Config{ + WalletDir: defaultWalletPath, + KeymanagerKind: keymanager.Derived, + WalletPassword: strongPass, + }, + SkipMnemonicConfirm: true, + }) + require.NoError(t, err) + km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false}) + require.NoError(t, err) + + s := &Server{ + keymanager: km, + walletInitialized: true, + wallet: w, + } + numAccounts := 50 + dr, ok := km.(*derived.Keymanager) + require.Equal(t, true, ok) + err = dr.RecoverAccountsFromMnemonic(ctx, mocks.TestMnemonic, "", numAccounts) + require.NoError(t, err) + + publicKeys, err := km.FetchValidatingPublicKeys(ctx) + require.NoError(t, err) + require.Equal(t, numAccounts, len(publicKeys)) + + // Create a validator database. + validatorDB, err := kv.NewKVStore(ctx, defaultWalletPath, &kv.Config{ + PubKeys: publicKeys, + }) + require.NoError(t, err) + s.valDB = validatorDB + + // Have to close it after import is done otherwise it complains db is not open. + defer func() { + require.NoError(t, validatorDB.Close()) + }() + + // Generate mock slashing history. + attestingHistory := make([][]*kv.AttestationRecord, 0) + proposalHistory := make([]kv.ProposalHistoryForPubkey, len(publicKeys)) + for i := 0; i < len(publicKeys); i++ { + proposalHistory[i].Proposals = make([]kv.Proposal, 0) + } + mockJSON, err := mocks.MockSlashingProtectionJSON(publicKeys, attestingHistory, proposalHistory) + require.NoError(t, err) + + // JSON encode the protection JSON and save it. + encoded, err := json.Marshal(mockJSON) + require.NoError(t, err) + + _, err = s.ImportSlashingProtection(ctx, &validatorpb.ImportSlashingProtectionRequest{ + SlashingProtectionJson: string(encoded), + }) + require.NoError(t, err) + rawPubKeys := make([][]byte, numAccounts) + for i := 0; i < numAccounts; i++ { + rawPubKeys[i] = publicKeys[i][:] + } + + // Deletes properly and returns slashing protection history. + resp, err := s.DeleteKeystores(ctx, ðpbservice.DeleteKeystoresRequest{ + PublicKeys: rawPubKeys, + }) + require.NoError(t, err) + require.Equal(t, numAccounts, len(resp.Statuses)) + for _, status := range resp.Statuses { + require.Equal(t, ethpbservice.DeletedKeystoreStatus_DELETED, status.Status) + } + publicKeys, err = km.FetchValidatingPublicKeys(ctx) + require.NoError(t, err) + require.Equal(t, 0, len(publicKeys)) + require.Equal(t, numAccounts, len(mockJSON.Data)) + + // Returns slashing protection history if already deleted. + resp, err = s.DeleteKeystores(ctx, ðpbservice.DeleteKeystoresRequest{ + PublicKeys: rawPubKeys, + }) + require.NoError(t, err) + require.Equal(t, numAccounts, len(resp.Statuses)) + for _, status := range resp.Statuses { + require.Equal(t, ethpbservice.DeletedKeystoreStatus_NOT_FOUND, status.Status) + } + require.Equal(t, numAccounts, len(mockJSON.Data)) +} diff --git a/validator/slashing-protection-history/export.go b/validator/slashing-protection-history/export.go index 0f37245385..37f6a577b5 100644 --- a/validator/slashing-protection-history/export.go +++ b/validator/slashing-protection-history/export.go @@ -16,7 +16,15 @@ import ( // ExportStandardProtectionJSON extracts all slashing protection data from a validator database // and packages it into an EIP-3076 compliant, standard -func ExportStandardProtectionJSON(ctx context.Context, validatorDB db.Database) (*format.EIPSlashingProtectionFormat, error) { +func ExportStandardProtectionJSON( + ctx context.Context, + validatorDB db.Database, + keysToFilter ...[]byte, +) (*format.EIPSlashingProtectionFormat, error) { + keysFilterMap := make(map[string]bool, len(keysToFilter)) + for _, k := range keysToFilter { + keysFilterMap[string(k)] = true + } interchangeJSON := &format.EIPSlashingProtectionFormat{} genesisValidatorsRoot, err := validatorDB.GenesisValidatorsRoot(ctx) if err != nil { @@ -50,6 +58,9 @@ func ExportStandardProtectionJSON(ctx context.Context, validatorDB db.Database) len(proposedPublicKeys), "Extracting signed blocks by validator public key", ) for _, pubKey := range proposedPublicKeys { + if _, ok := keysFilterMap[string(pubKey[:])]; len(keysToFilter) > 0 && !ok { + continue + } pubKeyHex, err := pubKeyToHexString(pubKey[:]) if err != nil { return nil, errors.Wrap(err, "could not convert public key to hex string") @@ -73,6 +84,9 @@ func ExportStandardProtectionJSON(ctx context.Context, validatorDB db.Database) len(attestedPublicKeys), "Extracting signed attestations by validator public key", ) for _, pubKey := range attestedPublicKeys { + if _, ok := keysFilterMap[string(pubKey[:])]; len(keysToFilter) > 0 && !ok { + continue + } pubKeyHex, err := pubKeyToHexString(pubKey[:]) if err != nil { return nil, errors.Wrap(err, "could not convert public key to hex string") diff --git a/validator/slashing-protection-history/round_trip_test.go b/validator/slashing-protection-history/round_trip_test.go index 3ca5a0aa2f..f782f90a91 100644 --- a/validator/slashing-protection-history/round_trip_test.go +++ b/validator/slashing-protection-history/round_trip_test.go @@ -128,6 +128,43 @@ func TestImportExport_RoundTrip_SkippedAttestationEpochs(t *testing.T) { require.DeepEqual(t, wanted.Data, eipStandard.Data) } +func TestImportExport_FilterKeys(t *testing.T) { + ctx := context.Background() + numValidators := 10 + publicKeys, err := slashtest.CreateRandomPubKeys(numValidators) + require.NoError(t, err) + validatorDB := dbtest.SetupDB(t, publicKeys) + + // First we setup some mock attesting and proposal histories and create a mock + // standard slashing protection format JSON struct. + attestingHistory, proposalHistory := slashtest.MockAttestingAndProposalHistories(publicKeys) + require.NoError(t, err) + wanted, err := slashtest.MockSlashingProtectionJSON(publicKeys, attestingHistory, proposalHistory) + require.NoError(t, err) + + // We encode the standard slashing protection struct into a JSON format. + blob, err := json.Marshal(wanted) + require.NoError(t, err) + buf := bytes.NewBuffer(blob) + + // Next, we attempt to import it into our validator database. + err = history.ImportStandardProtectionJSON(ctx, validatorDB, buf) + require.NoError(t, err) + + // Next up, we export our slashing protection database into the EIP standard file. + // Next, we attempt to import it into our validator database. + rawKeys := make([][]byte, 5) + for i := 0; i < len(rawKeys); i++ { + rawKeys[i] = publicKeys[i][:] + } + eipStandard, err := history.ExportStandardProtectionJSON(ctx, validatorDB, rawKeys...) + require.NoError(t, err) + + // We compare the metadata fields from import to export. + require.Equal(t, wanted.Metadata, eipStandard.Metadata) + require.Equal(t, len(rawKeys), len(eipStandard.Data)) +} + func TestImportInterchangeData_OK(t *testing.T) { ctx := context.Background() numValidators := 10 From 0ade1f121d59eb05b50799c37b673908b4fc381e Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Fri, 19 Nov 2021 20:01:15 +0800 Subject: [PATCH 09/45] Add Balance Field Trie (#9793) * save stuff * fix in v1 * clean up more * fix bugs * add comments and clean up * add flag + test * add tests * fmt * radek's review * gaz * kasey's review * gaz and new conditional * improve naming --- beacon-chain/state/fieldtrie/BUILD.bazel | 3 + beacon-chain/state/fieldtrie/field_trie.go | 60 +++++-- .../state/fieldtrie/field_trie_helpers.go | 153 +++++++++++++++++- beacon-chain/state/fieldtrie/helpers_test.go | 64 +++++++- beacon-chain/state/stateutil/BUILD.bazel | 2 - beacon-chain/state/stateutil/array_root.go | 35 ---- .../state/stateutil/validator_root.go | 40 ----- .../state/stateutil/validator_root_test.go | 15 -- beacon-chain/state/types/BUILD.bazel | 5 +- beacon-chain/state/types/types.go | 16 ++ beacon-chain/state/v1/BUILD.bazel | 1 + beacon-chain/state/v1/setters_misc.go | 5 + beacon-chain/state/v1/setters_validator.go | 4 + beacon-chain/state/v1/state_test.go | 90 +++++++++++ beacon-chain/state/v1/state_trie.go | 15 ++ beacon-chain/state/v1/state_trie_test.go | 7 + beacon-chain/state/v1/types.go | 2 +- beacon-chain/state/v2/BUILD.bazel | 2 + beacon-chain/state/v2/setters_misc.go | 5 + beacon-chain/state/v2/setters_test.go | 102 ++++++++++++ beacon-chain/state/v2/setters_validator.go | 4 + beacon-chain/state/v2/state_trie.go | 15 ++ beacon-chain/state/v2/state_trie_test.go | 7 + beacon-chain/state/v2/types.go | 3 + config/features/config.go | 5 + config/features/flags.go | 6 + encoding/ssz/helpers.go | 48 ++++++ encoding/ssz/helpers_test.go | 16 ++ .../altair/epoch_processing/BUILD.bazel | 6 +- .../epoch_processing/epoch_processing_test.go | 13 ++ .../mainnet/altair/random/BUILD.bazel | 5 +- .../mainnet/altair/random/random_test.go | 7 + .../mainnet/altair/rewards/BUILD.bazel | 5 +- .../mainnet/altair/rewards/rewards_test.go | 7 + .../mainnet/altair/sanity/BUILD.bazel | 6 +- .../mainnet/altair/sanity/sanity_test.go | 13 ++ .../phase0/epoch_processing/BUILD.bazel | 1 + .../epoch_processing/epoch_processing_test.go | 3 + .../mainnet/phase0/random/BUILD.bazel | 5 +- .../mainnet/phase0/random/random_test.go | 7 + .../mainnet/phase0/rewards/BUILD.bazel | 5 +- .../mainnet/phase0/rewards/rewards_test.go | 7 + .../mainnet/phase0/sanity/BUILD.bazel | 6 +- .../mainnet/phase0/sanity/sanity_test.go | 13 ++ 44 files changed, 717 insertions(+), 122 deletions(-) delete mode 100644 beacon-chain/state/stateutil/array_root.go delete mode 100644 beacon-chain/state/stateutil/validator_root_test.go create mode 100644 testing/spectest/mainnet/altair/epoch_processing/epoch_processing_test.go create mode 100644 testing/spectest/mainnet/altair/sanity/sanity_test.go create mode 100644 testing/spectest/mainnet/phase0/sanity/sanity_test.go diff --git a/beacon-chain/state/fieldtrie/BUILD.bazel b/beacon-chain/state/fieldtrie/BUILD.bazel index 180065daab..fcea708041 100644 --- a/beacon-chain/state/fieldtrie/BUILD.bazel +++ b/beacon-chain/state/fieldtrie/BUILD.bazel @@ -12,6 +12,8 @@ go_library( "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", "//crypto/hash:go_default_library", + "//encoding/bytesutil:go_default_library", + "//encoding/ssz:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//runtime/version:go_default_library", "@com_github_pkg_errors//:go_default_library", @@ -26,6 +28,7 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", "//beacon-chain/state/v1:go_default_library", "//config/params:go_default_library", diff --git a/beacon-chain/state/fieldtrie/field_trie.go b/beacon-chain/state/fieldtrie/field_trie.go index f002dc6ed5..4f441668b7 100644 --- a/beacon-chain/state/fieldtrie/field_trie.go +++ b/beacon-chain/state/fieldtrie/field_trie.go @@ -18,6 +18,7 @@ type FieldTrie struct { field types.FieldIndex dataType types.DataType length uint64 + numOfElems int } // NewFieldTrie is the constructor for the field trie data structure. It creates the corresponding @@ -26,18 +27,19 @@ type FieldTrie struct { func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) (*FieldTrie, error) { if elements == nil { return &FieldTrie{ - field: field, - dataType: dataType, - reference: stateutil.NewRef(1), - RWMutex: new(sync.RWMutex), - length: length, + field: field, + dataType: dataType, + reference: stateutil.NewRef(1), + RWMutex: new(sync.RWMutex), + length: length, + numOfElems: 0, }, nil } fieldRoots, err := fieldConverters(field, []uint64{}, elements, true) if err != nil { return nil, err } - if err := validateElements(field, elements, length); err != nil { + if err := validateElements(field, dataType, elements, length); err != nil { return nil, err } switch dataType { @@ -53,8 +55,9 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte reference: stateutil.NewRef(1), RWMutex: new(sync.RWMutex), length: length, + numOfElems: reflect.ValueOf(elements).Len(), }, nil - case types.CompositeArray: + case types.CompositeArray, types.CompressedArray: return &FieldTrie{ fieldLayers: stateutil.ReturnTrieLayerVariable(fieldRoots, length), field: field, @@ -62,6 +65,7 @@ func NewFieldTrie(field types.FieldIndex, dataType types.DataType, elements inte reference: stateutil.NewRef(1), RWMutex: new(sync.RWMutex), length: length, + numOfElems: reflect.ValueOf(elements).Len(), }, nil default: return nil, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(dataType).Name()) @@ -92,13 +96,40 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b if err != nil { return [32]byte{}, err } + f.numOfElems = reflect.ValueOf(elements).Len() return fieldRoot, nil case types.CompositeArray: fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(fieldRoots, indices, f.fieldLayers) if err != nil { return [32]byte{}, err } + f.numOfElems = reflect.ValueOf(elements).Len() return stateutil.AddInMixin(fieldRoot, uint64(len(f.fieldLayers[0]))) + case types.CompressedArray: + numOfElems, err := f.field.ElemsInChunk() + if err != nil { + return [32]byte{}, err + } + // We remove the duplicates here in order to prevent + // duplicated insertions into the trie. + newIndices := []uint64{} + indexExists := make(map[uint64]bool) + newRoots := make([][32]byte, 0, len(fieldRoots)/int(numOfElems)) + for i, idx := range indices { + startIdx := idx / numOfElems + if indexExists[startIdx] { + continue + } + newIndices = append(newIndices, startIdx) + indexExists[startIdx] = true + newRoots = append(newRoots, fieldRoots[i]) + } + fieldRoot, f.fieldLayers, err = stateutil.RecomputeFromLayerVariable(newRoots, newIndices, f.fieldLayers) + if err != nil { + return [32]byte{}, err + } + f.numOfElems = reflect.ValueOf(elements).Len() + return stateutil.AddInMixin(fieldRoot, uint64(f.numOfElems)) default: return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name()) } @@ -109,11 +140,12 @@ func (f *FieldTrie) RecomputeTrie(indices []uint64, elements interface{}) ([32]b func (f *FieldTrie) CopyTrie() *FieldTrie { if f.fieldLayers == nil { return &FieldTrie{ - field: f.field, - dataType: f.dataType, - reference: stateutil.NewRef(1), - RWMutex: new(sync.RWMutex), - length: f.length, + field: f.field, + dataType: f.dataType, + reference: stateutil.NewRef(1), + RWMutex: new(sync.RWMutex), + length: f.length, + numOfElems: f.numOfElems, } } dstFieldTrie := make([][]*[32]byte, len(f.fieldLayers)) @@ -128,6 +160,7 @@ func (f *FieldTrie) CopyTrie() *FieldTrie { reference: stateutil.NewRef(1), RWMutex: new(sync.RWMutex), length: f.length, + numOfElems: f.numOfElems, } } @@ -139,6 +172,9 @@ func (f *FieldTrie) TrieRoot() ([32]byte, error) { case types.CompositeArray: trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0] return stateutil.AddInMixin(trieRoot, uint64(len(f.fieldLayers[0]))) + case types.CompressedArray: + trieRoot := *f.fieldLayers[len(f.fieldLayers)-1][0] + return stateutil.AddInMixin(trieRoot, uint64(f.numOfElems)) default: return [32]byte{}, errors.Errorf("unrecognized data type in field map: %v", reflect.TypeOf(f.dataType).Name()) } diff --git a/beacon-chain/state/fieldtrie/field_trie_helpers.go b/beacon-chain/state/fieldtrie/field_trie_helpers.go index 77aa4ac167..5a7c6b687e 100644 --- a/beacon-chain/state/fieldtrie/field_trie_helpers.go +++ b/beacon-chain/state/fieldtrie/field_trie_helpers.go @@ -1,6 +1,7 @@ package fieldtrie import ( + "encoding/binary" "fmt" "reflect" @@ -8,20 +9,37 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/beacon-chain/state/types" "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/runtime/version" ) func (f *FieldTrie) validateIndices(idxs []uint64) error { + length := f.length + if f.dataType == types.CompressedArray { + comLength, err := f.field.ElemsInChunk() + if err != nil { + return err + } + length *= comLength + } for _, idx := range idxs { - if idx >= f.length { - return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, f.length) + if idx >= length { + return errors.Errorf("invalid index for field %s: %d >= length %d", f.field.String(version.Phase0), idx, length) } } return nil } -func validateElements(field types.FieldIndex, elements interface{}, length uint64) error { +func validateElements(field types.FieldIndex, dataType types.DataType, elements interface{}, length uint64) error { + if dataType == types.CompressedArray { + comLength, err := field.ElemsInChunk() + if err != nil { + return err + } + length *= comLength + } val := reflect.ValueOf(elements) if val.Len() > int(length) { return errors.Errorf("elements length is larger than expected for field %s: %d > %d", field.String(version.Phase0), val.Len(), length) @@ -38,21 +56,21 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac return nil, errors.Errorf("Wanted type of %v but got %v", reflect.TypeOf([][]byte{}).Name(), reflect.TypeOf(elements).Name()) } - return stateutil.HandleByteArrays(val, indices, convertAll) + return handleByteArrays(val, indices, convertAll) case types.Eth1DataVotes: val, ok := elements.([]*ethpb.Eth1Data) if !ok { return nil, errors.Errorf("Wanted type of %v but got %v", reflect.TypeOf([]*ethpb.Eth1Data{}).Name(), reflect.TypeOf(elements).Name()) } - return HandleEth1DataSlice(val, indices, convertAll) + return handleEth1DataSlice(val, indices, convertAll) case types.Validators: val, ok := elements.([]*ethpb.Validator) if !ok { return nil, errors.Errorf("Wanted type of %v but got %v", reflect.TypeOf([]*ethpb.Validator{}).Name(), reflect.TypeOf(elements).Name()) } - return stateutil.HandleValidatorSlice(val, indices, convertAll) + return handleValidatorSlice(val, indices, convertAll) case types.PreviousEpochAttestations, types.CurrentEpochAttestations: val, ok := elements.([]*ethpb.PendingAttestation) if !ok { @@ -60,13 +78,87 @@ func fieldConverters(field types.FieldIndex, indices []uint64, elements interfac reflect.TypeOf([]*ethpb.PendingAttestation{}).Name(), reflect.TypeOf(elements).Name()) } return handlePendingAttestation(val, indices, convertAll) + case types.Balances: + val, ok := elements.([]uint64) + if !ok { + return nil, errors.Errorf("Wanted type of %v but got %v", + reflect.TypeOf([]uint64{}).Name(), reflect.TypeOf(elements).Name()) + } + return handleBalanceSlice(val, indices, convertAll) default: return [][32]byte{}, errors.Errorf("got unsupported type of %v", reflect.TypeOf(elements).Name()) } } -// HandleEth1DataSlice processes a list of eth1data and indices into the appropriate roots. -func HandleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) { +// handleByteArrays computes and returns byte arrays in a slice of root format. +func handleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) { + length := len(indices) + if convertAll { + length = len(val) + } + roots := make([][32]byte, 0, length) + rootCreator := func(input []byte) { + newRoot := bytesutil.ToBytes32(input) + roots = append(roots, newRoot) + } + if convertAll { + for i := range val { + rootCreator(val[i]) + } + return roots, nil + } + if len(val) > 0 { + for _, idx := range indices { + if idx > uint64(len(val))-1 { + return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val)) + } + rootCreator(val[idx]) + } + } + return roots, nil +} + +// handleValidatorSlice returns the validator indices in a slice of root format. +func handleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) { + length := len(indices) + if convertAll { + length = len(val) + } + roots := make([][32]byte, 0, length) + hasher := hash.CustomSHA256Hasher() + rootCreator := func(input *ethpb.Validator) error { + newRoot, err := stateutil.ValidatorRootWithHasher(hasher, input) + if err != nil { + return err + } + roots = append(roots, newRoot) + return nil + } + if convertAll { + for i := range val { + err := rootCreator(val[i]) + if err != nil { + return nil, err + } + } + return roots, nil + } + if len(val) > 0 { + for _, idx := range indices { + if idx > uint64(len(val))-1 { + return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val)) + } + err := rootCreator(val[idx]) + if err != nil { + return nil, err + } + } + } + return roots, nil +} + +// handleEth1DataSlice processes a list of eth1data and indices into the appropriate roots. +func handleEth1DataSlice(val []*ethpb.Eth1Data, indices []uint64, convertAll bool) ([][32]byte, error) { length := len(indices) if convertAll { length = len(val) @@ -141,3 +233,48 @@ func handlePendingAttestation(val []*ethpb.PendingAttestation, indices []uint64, } return roots, nil } + +func handleBalanceSlice(val []uint64, indices []uint64, convertAll bool) ([][32]byte, error) { + if convertAll { + balancesMarshaling := make([][]byte, 0) + for _, b := range val { + balanceBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(balanceBuf, b) + balancesMarshaling = append(balancesMarshaling, balanceBuf) + } + balancesChunks, err := ssz.PackByChunk(balancesMarshaling) + if err != nil { + return [][32]byte{}, errors.Wrap(err, "could not pack balances into chunks") + } + return balancesChunks, nil + } + if len(val) > 0 { + numOfElems, err := types.Balances.ElemsInChunk() + if err != nil { + return nil, err + } + roots := [][32]byte{} + for _, idx := range indices { + // We split the indexes into their relevant groups. Balances + // are compressed according to 4 values -> 1 chunk. + startIdx := idx / numOfElems + startGroup := startIdx * numOfElems + chunk := [32]byte{} + sizeOfElem := len(chunk) / int(numOfElems) + for i, j := 0, startGroup; j < startGroup+numOfElems; i, j = i+sizeOfElem, j+1 { + wantedVal := uint64(0) + // We are adding chunks in sets of 4, if the set is at the edge of the array + // then you will need to zero out the rest of the chunk. Ex : 41 indexes, + // so 41 % 4 = 1 . There are 3 indexes, which do not exist yet but we + // have to add in as a root. These 3 indexes are then given a 'zero' value. + if int(j) < len(val) { + wantedVal = val[j] + } + binary.LittleEndian.PutUint64(chunk[i:i+sizeOfElem], wantedVal) + } + roots = append(roots, chunk) + } + return roots, nil + } + return [][32]byte{}, nil +} diff --git a/beacon-chain/state/fieldtrie/helpers_test.go b/beacon-chain/state/fieldtrie/helpers_test.go index ec9cf5aae2..54666b872a 100644 --- a/beacon-chain/state/fieldtrie/helpers_test.go +++ b/beacon-chain/state/fieldtrie/helpers_test.go @@ -1,8 +1,13 @@ package fieldtrie import ( + "encoding/binary" + "sync" "testing" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/params" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/testing/assert" ) @@ -17,7 +22,64 @@ func Test_handlePendingAttestation_OutOfRange(t *testing.T) { func Test_handleEth1DataSlice_OutOfRange(t *testing.T) { items := make([]*ethpb.Eth1Data, 1) indices := []uint64{3} - _, err := HandleEth1DataSlice(items, indices, false) + _, err := handleEth1DataSlice(items, indices, false) assert.ErrorContains(t, "index 3 greater than number of items in eth1 data slice 1", err) } + +func Test_handleValidatorSlice_OutOfRange(t *testing.T) { + vals := make([]*ethpb.Validator, 1) + indices := []uint64{3} + _, err := handleValidatorSlice(vals, indices, false) + assert.ErrorContains(t, "index 3 greater than number of validators 1", err) +} + +func TestBalancesSlice_CorrectRoots_All(t *testing.T) { + balances := []uint64{5, 2929, 34, 1291, 354305} + roots, err := handleBalanceSlice(balances, []uint64{}, true) + assert.NoError(t, err) + + root1 := [32]byte{} + binary.LittleEndian.PutUint64(root1[:8], balances[0]) + binary.LittleEndian.PutUint64(root1[8:16], balances[1]) + binary.LittleEndian.PutUint64(root1[16:24], balances[2]) + binary.LittleEndian.PutUint64(root1[24:32], balances[3]) + + root2 := [32]byte{} + binary.LittleEndian.PutUint64(root2[:8], balances[4]) + + assert.DeepEqual(t, roots, [][32]byte{root1, root2}) +} + +func TestBalancesSlice_CorrectRoots_Some(t *testing.T) { + balances := []uint64{5, 2929, 34, 1291, 354305} + roots, err := handleBalanceSlice(balances, []uint64{2, 3}, false) + assert.NoError(t, err) + + root1 := [32]byte{} + binary.LittleEndian.PutUint64(root1[:8], balances[0]) + binary.LittleEndian.PutUint64(root1[8:16], balances[1]) + binary.LittleEndian.PutUint64(root1[16:24], balances[2]) + binary.LittleEndian.PutUint64(root1[24:32], balances[3]) + + // Returns root for each indice(even if duplicated) + assert.DeepEqual(t, roots, [][32]byte{root1, root1}) +} + +func TestValidateIndices_CompressedField(t *testing.T) { + fakeTrie := &FieldTrie{ + RWMutex: new(sync.RWMutex), + reference: stateutil.NewRef(0), + fieldLayers: nil, + field: types.Balances, + dataType: types.CompressedArray, + length: params.BeaconConfig().ValidatorRegistryLimit / 4, + numOfElems: 0, + } + goodIdx := params.BeaconConfig().ValidatorRegistryLimit - 1 + assert.NoError(t, fakeTrie.validateIndices([]uint64{goodIdx})) + + badIdx := goodIdx + 1 + assert.ErrorContains(t, "invalid index for field balances", fakeTrie.validateIndices([]uint64{badIdx})) + +} diff --git a/beacon-chain/state/stateutil/BUILD.bazel b/beacon-chain/state/stateutil/BUILD.bazel index 8d9d2cc194..8b83cb2ca9 100644 --- a/beacon-chain/state/stateutil/BUILD.bazel +++ b/beacon-chain/state/stateutil/BUILD.bazel @@ -3,7 +3,6 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ - "array_root.go", "block_header_root.go", "eth1_root.go", "participation_bit_root.go", @@ -49,7 +48,6 @@ go_test( "state_root_test.go", "stateutil_test.go", "trie_helpers_test.go", - "validator_root_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/beacon-chain/state/stateutil/array_root.go b/beacon-chain/state/stateutil/array_root.go deleted file mode 100644 index eb07514279..0000000000 --- a/beacon-chain/state/stateutil/array_root.go +++ /dev/null @@ -1,35 +0,0 @@ -package stateutil - -import ( - "fmt" - - "github.com/prysmaticlabs/prysm/encoding/bytesutil" -) - -// HandleByteArrays computes and returns byte arrays in a slice of root format. -func HandleByteArrays(val [][]byte, indices []uint64, convertAll bool) ([][32]byte, error) { - length := len(indices) - if convertAll { - length = len(val) - } - roots := make([][32]byte, 0, length) - rootCreator := func(input []byte) { - newRoot := bytesutil.ToBytes32(input) - roots = append(roots, newRoot) - } - if convertAll { - for i := range val { - rootCreator(val[i]) - } - return roots, nil - } - if len(val) > 0 { - for _, idx := range indices { - if idx > uint64(len(val))-1 { - return nil, fmt.Errorf("index %d greater than number of byte arrays %d", idx, len(val)) - } - rootCreator(val[idx]) - } - } - return roots, nil -} diff --git a/beacon-chain/state/stateutil/validator_root.go b/beacon-chain/state/stateutil/validator_root.go index 7564572447..e41fc0d6af 100644 --- a/beacon-chain/state/stateutil/validator_root.go +++ b/beacon-chain/state/stateutil/validator_root.go @@ -2,7 +2,6 @@ package stateutil import ( "encoding/binary" - "fmt" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/config/params" @@ -127,42 +126,3 @@ func ValidatorEncKey(validator *ethpb.Validator) []byte { return enc } - -// HandleValidatorSlice returns the validator indices in a slice of root format. -func HandleValidatorSlice(val []*ethpb.Validator, indices []uint64, convertAll bool) ([][32]byte, error) { - length := len(indices) - if convertAll { - length = len(val) - } - roots := make([][32]byte, 0, length) - hasher := hash.CustomSHA256Hasher() - rootCreator := func(input *ethpb.Validator) error { - newRoot, err := ValidatorRootWithHasher(hasher, input) - if err != nil { - return err - } - roots = append(roots, newRoot) - return nil - } - if convertAll { - for i := range val { - err := rootCreator(val[i]) - if err != nil { - return nil, err - } - } - return roots, nil - } - if len(val) > 0 { - for _, idx := range indices { - if idx > uint64(len(val))-1 { - return nil, fmt.Errorf("index %d greater than number of validators %d", idx, len(val)) - } - err := rootCreator(val[idx]) - if err != nil { - return nil, err - } - } - } - return roots, nil -} diff --git a/beacon-chain/state/stateutil/validator_root_test.go b/beacon-chain/state/stateutil/validator_root_test.go deleted file mode 100644 index 9526a062f2..0000000000 --- a/beacon-chain/state/stateutil/validator_root_test.go +++ /dev/null @@ -1,15 +0,0 @@ -package stateutil - -import ( - "testing" - - ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" - "github.com/prysmaticlabs/prysm/testing/assert" -) - -func Test_handleValidatorSlice_OutOfRange(t *testing.T) { - vals := make([]*ethpb.Validator, 1) - indices := []uint64{3} - _, err := HandleValidatorSlice(vals, indices, false) - assert.ErrorContains(t, "index 3 greater than number of validators 1", err) -} diff --git a/beacon-chain/state/types/BUILD.bazel b/beacon-chain/state/types/BUILD.bazel index 43ec2319b1..23a0642300 100644 --- a/beacon-chain/state/types/BUILD.bazel +++ b/beacon-chain/state/types/BUILD.bazel @@ -5,7 +5,10 @@ go_library( srcs = ["types.go"], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/types", visibility = ["//beacon-chain:__subpackages__"], - deps = ["//runtime/version:go_default_library"], + deps = [ + "//runtime/version:go_default_library", + "@com_github_pkg_errors//:go_default_library", + ], ) go_test( diff --git a/beacon-chain/state/types/types.go b/beacon-chain/state/types/types.go index 34feb537e3..53c0253471 100644 --- a/beacon-chain/state/types/types.go +++ b/beacon-chain/state/types/types.go @@ -1,6 +1,7 @@ package types import ( + "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/runtime/version" ) @@ -18,6 +19,10 @@ const ( // CompositeArray represents a variable length array with // a non primitive type. CompositeArray + // CompressedArray represents a variable length array which + // can pack multiple elements into a leaf of the underlying + // trie. + CompressedArray ) // String returns the name of the field index. @@ -84,6 +89,17 @@ func (f FieldIndex) String(stateVersion int) string { } } +// ElemsInChunk returns the number of elements in the chunk (number of +// elements that are able to be packed). +func (f FieldIndex) ElemsInChunk() (uint64, error) { + switch f { + case Balances: + return 4, nil + default: + return 0, errors.Errorf("field %d doesn't support element compression", f) + } +} + // Below we define a set of useful enum values for the field // indices of the beacon state. For example, genesisTime is the // 0th field of the beacon state. This is helpful when we are diff --git a/beacon-chain/state/v1/BUILD.bazel b/beacon-chain/state/v1/BUILD.bazel index c0360749df..ca1220a9c5 100644 --- a/beacon-chain/state/v1/BUILD.bazel +++ b/beacon-chain/state/v1/BUILD.bazel @@ -87,6 +87,7 @@ go_test( "//beacon-chain/state:go_default_library", "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", + "//config/features:go_default_library", "//config/params:go_default_library", "//encoding/bytesutil:go_default_library", "//proto/prysm/v1alpha1:go_default_library", diff --git a/beacon-chain/state/v1/setters_misc.go b/beacon-chain/state/v1/setters_misc.go index f3cef51867..e6b89626b3 100644 --- a/beacon-chain/state/v1/setters_misc.go +++ b/beacon-chain/state/v1/setters_misc.go @@ -5,6 +5,7 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/crypto/hash" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "google.golang.org/protobuf/proto" @@ -172,6 +173,10 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin if b.rebuildTrie[index] { return } + // Exit early if balance trie computation isn't enabled. + if !features.Get().EnableBalanceTrieComputation && index == balances { + return + } totalIndicesLen := len(b.dirtyIndices[index]) + len(indices) if totalIndicesLen > indicesLimit { b.rebuildTrie[index] = true diff --git a/beacon-chain/state/v1/setters_validator.go b/beacon-chain/state/v1/setters_validator.go index e60b179e43..598c278408 100644 --- a/beacon-chain/state/v1/setters_validator.go +++ b/beacon-chain/state/v1/setters_validator.go @@ -103,6 +103,7 @@ func (b *BeaconState) SetBalances(val []uint64) error { b.state.Balances = val b.markFieldAsDirty(balances) + b.rebuildTrie[balances] = true return nil } @@ -128,6 +129,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64 bals[idx] = val b.state.Balances = bals b.markFieldAsDirty(balances) + b.addDirtyIndices(balances, []uint64{uint64(idx)}) return nil } @@ -219,6 +221,8 @@ func (b *BeaconState) AppendBalance(bal uint64) error { } b.state.Balances = append(bals, bal) + balIdx := len(b.state.Balances) - 1 b.markFieldAsDirty(balances) + b.addDirtyIndices(balances, []uint64{uint64(balIdx)}) return nil } diff --git a/beacon-chain/state/v1/state_test.go b/beacon-chain/state/v1/state_test.go index 4d8c90396f..20379ca0db 100644 --- a/beacon-chain/state/v1/state_test.go +++ b/beacon-chain/state/v1/state_test.go @@ -1,10 +1,13 @@ package v1 import ( + "context" "strconv" "sync" "testing" + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/encoding/bytesutil" @@ -104,3 +107,90 @@ func TestStateTrie_IsNil(t *testing.T) { nonNilState := &BeaconState{state: ðpb.BeaconState{}} assert.Equal(t, false, nonNilState.IsNil()) } + +func TestBeaconState_AppendBalanceWithTrie(t *testing.T) { + count := uint64(100) + vals := make([]*ethpb.Validator, 0, count) + bals := make([]uint64, 0, count) + for i := uint64(1); i < count; i++ { + someRoot := [32]byte{} + someKey := [48]byte{} + copy(someRoot[:], strconv.Itoa(int(i))) + copy(someKey[:], strconv.Itoa(int(i))) + vals = append(vals, ðpb.Validator{ + PublicKey: someKey[:], + WithdrawalCredentials: someRoot[:], + EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, + Slashed: false, + ActivationEligibilityEpoch: 1, + ActivationEpoch: 1, + ExitEpoch: 1, + WithdrawableEpoch: 1, + }) + bals = append(bals, params.BeaconConfig().MaxEffectiveBalance) + } + zeroHash := params.BeaconConfig().ZeroHash + mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockblockRoots); i++ { + mockblockRoots[i] = zeroHash[:] + } + + mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockstateRoots); i++ { + mockstateRoots[i] = zeroHash[:] + } + mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector) + for i := 0; i < len(mockrandaoMixes); i++ { + mockrandaoMixes[i] = zeroHash[:] + } + var pubKeys [][]byte + for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ { + pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength)) + } + st, err := InitializeFromProto(ðpb.BeaconState{ + Slot: 1, + GenesisValidatorsRoot: make([]byte, 32), + Fork: ðpb.Fork{ + PreviousVersion: make([]byte, 4), + CurrentVersion: make([]byte, 4), + Epoch: 0, + }, + LatestBlockHeader: ðpb.BeaconBlockHeader{ + ParentRoot: make([]byte, 32), + StateRoot: make([]byte, 32), + BodyRoot: make([]byte, 32), + }, + Validators: vals, + Balances: bals, + Eth1Data: ðpb.Eth1Data{ + DepositRoot: make([]byte, 32), + BlockHash: make([]byte, 32), + }, + BlockRoots: mockblockRoots, + StateRoots: mockstateRoots, + RandaoMixes: mockrandaoMixes, + JustificationBits: bitfield.NewBitvector4(), + PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), + }) + assert.NoError(t, err) + _, err = st.HashTreeRoot(context.Background()) + assert.NoError(t, err) + + for i := 0; i < 100; i++ { + if i%2 == 0 { + assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000)) + } + if i%3 == 0 { + assert.NoError(t, st.AppendBalance(1000)) + } + } + _, err = st.HashTreeRoot(context.Background()) + assert.NoError(t, err) + newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances]) + wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances) + assert.NoError(t, err) + assert.Equal(t, wantedRt, newRt, "state roots are unequal") +} diff --git a/beacon-chain/state/v1/state_trie.go b/beacon-chain/state/v1/state_trie.go index 98731b0b5a..2c862ca034 100644 --- a/beacon-chain/state/v1/state_trie.go +++ b/beacon-chain/state/v1/state_trie.go @@ -12,6 +12,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/container/slice" "github.com/prysmaticlabs/prysm/crypto/hash" @@ -319,6 +320,20 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) } return b.recomputeFieldTrie(validators, b.state.Validators) case balances: + if features.Get().EnableBalanceTrieComputation { + if b.rebuildTrie[field] { + maxBalCap := params.BeaconConfig().ValidatorRegistryLimit + elemSize := uint64(8) + balLimit := (maxBalCap*elemSize + 31) / 32 + err := b.resetFieldTrie(field, b.state.Balances, balLimit) + if err != nil { + return [32]byte{}, err + } + delete(b.rebuildTrie, field) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(balances, b.state.Balances) + } return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances) case randaoMixes: if b.rebuildTrie[field] { diff --git a/beacon-chain/state/v1/state_trie_test.go b/beacon-chain/state/v1/state_trie_test.go index 542eb1a163..d3a155c4f4 100644 --- a/beacon-chain/state/v1/state_trie_test.go +++ b/beacon-chain/state/v1/state_trie_test.go @@ -7,6 +7,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/state" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/encoding/bytesutil" eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" @@ -16,6 +17,12 @@ import ( "github.com/prysmaticlabs/prysm/testing/util" ) +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + func TestInitializeFromProto(t *testing.T) { testState, _ := util.DeterministicGenesisState(t, 64) pbState, err := v1.ProtobufBeaconState(testState.InnerStateUnsafe()) diff --git a/beacon-chain/state/v1/types.go b/beacon-chain/state/v1/types.go index efdfefb4b1..d9ba4b1e29 100644 --- a/beacon-chain/state/v1/types.go +++ b/beacon-chain/state/v1/types.go @@ -17,7 +17,6 @@ var _ state.BeaconState = (*BeaconState)(nil) func init() { fieldMap = make(map[types.FieldIndex]types.DataType, params.BeaconConfig().BeaconStateFieldCount) - // Initialize the fixed sized arrays. fieldMap[types.BlockRoots] = types.BasicArray fieldMap[types.StateRoots] = types.BasicArray @@ -28,6 +27,7 @@ func init() { fieldMap[types.Validators] = types.CompositeArray fieldMap[types.PreviousEpochAttestations] = types.CompositeArray fieldMap[types.CurrentEpochAttestations] = types.CompositeArray + fieldMap[types.Balances] = types.CompressedArray } // fieldMap keeps track of each field diff --git a/beacon-chain/state/v2/BUILD.bazel b/beacon-chain/state/v2/BUILD.bazel index 5c2995845f..063fbef75b 100644 --- a/beacon-chain/state/v2/BUILD.bazel +++ b/beacon-chain/state/v2/BUILD.bazel @@ -79,11 +79,13 @@ go_test( "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", "//beacon-chain/state/v1:go_default_library", + "//config/features:go_default_library", "//config/params:go_default_library", "//encoding/bytesutil:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//testing/assert:go_default_library", "//testing/require:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", ], ) diff --git a/beacon-chain/state/v2/setters_misc.go b/beacon-chain/state/v2/setters_misc.go index 5395bee2d4..87da6bcd30 100644 --- a/beacon-chain/state/v2/setters_misc.go +++ b/beacon-chain/state/v2/setters_misc.go @@ -5,6 +5,7 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/crypto/hash" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "google.golang.org/protobuf/proto" @@ -171,6 +172,10 @@ func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uin if b.rebuildTrie[index] { return } + // Exit early if balance trie computation isn't enabled. + if !features.Get().EnableBalanceTrieComputation && index == balances { + return + } totalIndicesLen := len(b.dirtyIndices[index]) + len(indices) if totalIndicesLen > indicesLimit { b.rebuildTrie[index] = true diff --git a/beacon-chain/state/v2/setters_test.go b/beacon-chain/state/v2/setters_test.go index f05031c32d..82e692000b 100644 --- a/beacon-chain/state/v2/setters_test.go +++ b/beacon-chain/state/v2/setters_test.go @@ -2,10 +2,15 @@ package v2 import ( "context" + "strconv" "testing" + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/go-bitfield" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types" "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/testing/assert" @@ -57,3 +62,100 @@ func TestAppendBeyondIndicesLimit(t *testing.T) { assert.Equal(t, true, st.rebuildTrie[validators]) assert.Equal(t, len(st.dirtyIndices[validators]), 0) } + +func TestBeaconState_AppendBalanceWithTrie(t *testing.T) { + count := uint64(100) + vals := make([]*ethpb.Validator, 0, count) + bals := make([]uint64, 0, count) + for i := uint64(1); i < count; i++ { + someRoot := [32]byte{} + someKey := [48]byte{} + copy(someRoot[:], strconv.Itoa(int(i))) + copy(someKey[:], strconv.Itoa(int(i))) + vals = append(vals, ðpb.Validator{ + PublicKey: someKey[:], + WithdrawalCredentials: someRoot[:], + EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, + Slashed: false, + ActivationEligibilityEpoch: 1, + ActivationEpoch: 1, + ExitEpoch: 1, + WithdrawableEpoch: 1, + }) + bals = append(bals, params.BeaconConfig().MaxEffectiveBalance) + } + zeroHash := params.BeaconConfig().ZeroHash + mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockblockRoots); i++ { + mockblockRoots[i] = zeroHash[:] + } + + mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockstateRoots); i++ { + mockstateRoots[i] = zeroHash[:] + } + mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector) + for i := 0; i < len(mockrandaoMixes); i++ { + mockrandaoMixes[i] = zeroHash[:] + } + var pubKeys [][]byte + for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ { + pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength)) + } + st, err := InitializeFromProto(ðpb.BeaconStateAltair{ + Slot: 1, + GenesisValidatorsRoot: make([]byte, 32), + Fork: ðpb.Fork{ + PreviousVersion: make([]byte, 4), + CurrentVersion: make([]byte, 4), + Epoch: 0, + }, + LatestBlockHeader: ðpb.BeaconBlockHeader{ + ParentRoot: make([]byte, 32), + StateRoot: make([]byte, 32), + BodyRoot: make([]byte, 32), + }, + CurrentEpochParticipation: []byte{}, + PreviousEpochParticipation: []byte{}, + Validators: vals, + Balances: bals, + Eth1Data: ð.Eth1Data{ + DepositRoot: make([]byte, 32), + BlockHash: make([]byte, 32), + }, + BlockRoots: mockblockRoots, + StateRoots: mockstateRoots, + RandaoMixes: mockrandaoMixes, + JustificationBits: bitfield.NewBitvector4(), + PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), + CurrentSyncCommittee: ðpb.SyncCommittee{ + Pubkeys: pubKeys, + AggregatePubkey: make([]byte, 48), + }, + NextSyncCommittee: ðpb.SyncCommittee{ + Pubkeys: pubKeys, + AggregatePubkey: make([]byte, 48), + }, + }) + assert.NoError(t, err) + _, err = st.HashTreeRoot(context.Background()) + assert.NoError(t, err) + + for i := 0; i < 100; i++ { + if i%2 == 0 { + assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000)) + } + if i%3 == 0 { + assert.NoError(t, st.AppendBalance(1000)) + } + } + _, err = st.HashTreeRoot(context.Background()) + assert.NoError(t, err) + newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances]) + wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances) + assert.NoError(t, err) + assert.Equal(t, wantedRt, newRt, "state roots are unequal") +} diff --git a/beacon-chain/state/v2/setters_validator.go b/beacon-chain/state/v2/setters_validator.go index f359a9d7e2..3299c09022 100644 --- a/beacon-chain/state/v2/setters_validator.go +++ b/beacon-chain/state/v2/setters_validator.go @@ -102,6 +102,7 @@ func (b *BeaconState) SetBalances(val []uint64) error { b.sharedFieldReferences[balances] = stateutil.NewRef(1) b.state.Balances = val + b.rebuildTrie[balances] = true b.markFieldAsDirty(balances) return nil } @@ -128,6 +129,7 @@ func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64 bals[idx] = val b.state.Balances = bals b.markFieldAsDirty(balances) + b.addDirtyIndices(balances, []uint64{uint64(idx)}) return nil } @@ -219,7 +221,9 @@ func (b *BeaconState) AppendBalance(bal uint64) error { } b.state.Balances = append(bals, bal) + balIdx := len(b.state.Balances) - 1 b.markFieldAsDirty(balances) + b.addDirtyIndices(balances, []uint64{uint64(balIdx)}) return nil } diff --git a/beacon-chain/state/v2/state_trie.go b/beacon-chain/state/v2/state_trie.go index 40a6cab31c..8015605ab0 100644 --- a/beacon-chain/state/v2/state_trie.go +++ b/beacon-chain/state/v2/state_trie.go @@ -12,6 +12,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/container/slice" "github.com/prysmaticlabs/prysm/crypto/hash" @@ -324,6 +325,20 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) } return b.recomputeFieldTrie(validators, b.state.Validators) case balances: + if features.Get().EnableBalanceTrieComputation { + if b.rebuildTrie[field] { + maxBalCap := params.BeaconConfig().ValidatorRegistryLimit + elemSize := uint64(8) + balLimit := (maxBalCap*elemSize + 31) / 32 + err := b.resetFieldTrie(field, b.state.Balances, balLimit) + if err != nil { + return [32]byte{}, err + } + delete(b.rebuildTrie, field) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(balances, b.state.Balances) + } return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances) case randaoMixes: if b.rebuildTrie[field] { diff --git a/beacon-chain/state/v2/state_trie_test.go b/beacon-chain/state/v2/state_trie_test.go index 8f8508c70f..e0b5d295aa 100644 --- a/beacon-chain/state/v2/state_trie_test.go +++ b/beacon-chain/state/v2/state_trie_test.go @@ -6,6 +6,7 @@ import ( "testing" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" @@ -13,6 +14,12 @@ import ( "github.com/prysmaticlabs/prysm/testing/require" ) +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + func TestValidatorMap_DistinctCopy(t *testing.T) { count := uint64(100) vals := make([]*ethpb.Validator, 0, count) diff --git a/beacon-chain/state/v2/types.go b/beacon-chain/state/v2/types.go index aa70308ba6..193037e7ab 100644 --- a/beacon-chain/state/v2/types.go +++ b/beacon-chain/state/v2/types.go @@ -22,6 +22,9 @@ func init() { // Initialize the composite arrays. fieldMap[types.Eth1DataVotes] = types.CompositeArray fieldMap[types.Validators] = types.CompositeArray + + // Initialize Compressed Arrays + fieldMap[types.Balances] = types.CompressedArray } // fieldMap keeps track of each field diff --git a/config/features/config.go b/config/features/config.go index e86a178261..f531f860de 100644 --- a/config/features/config.go +++ b/config/features/config.go @@ -52,6 +52,7 @@ type Flags struct { EnableHistoricalSpaceRepresentation bool // EnableHistoricalSpaceRepresentation enables the saving of registry validators in separate buckets to save space EnableGetBlockOptimizations bool // EnableGetBlockOptimizations optimizes some elements of the GetBlock() function. EnableBatchVerification bool // EnableBatchVerification enables batch signature verification on gossip messages. + EnableBalanceTrieComputation bool // EnableBalanceTrieComputation enables our beacon state to use balance tries for hash tree root operations. // Logging related toggles. DisableGRPCConnectionLogs bool // Disables logging when a new grpc client has connected. @@ -223,6 +224,10 @@ func ConfigureBeaconChain(ctx *cli.Context) { logEnabled(enableBatchGossipVerification) cfg.EnableBatchVerification = true } + if ctx.Bool(enableBalanceTrieComputation.Name) { + logEnabled(enableBalanceTrieComputation) + cfg.EnableBalanceTrieComputation = true + } Init(cfg) } diff --git a/config/features/flags.go b/config/features/flags.go index 5cd136a303..2400bf7898 100644 --- a/config/features/flags.go +++ b/config/features/flags.go @@ -139,6 +139,10 @@ var ( Name: "enable-batch-gossip-verification", Usage: "This enables batch verification of signatures received over gossip.", } + enableBalanceTrieComputation = &cli.BoolFlag{ + Name: "enable-balance-trie-computation", + Usage: "This enables optimized hash tree root operations for our balance field.", + } ) // devModeFlags holds list of flags that are set when development mode is on. @@ -147,6 +151,7 @@ var devModeFlags = []cli.Flag{ forceOptMaxCoverAggregationStategy, enableGetBlockOptimizations, enableBatchGossipVerification, + enableBalanceTrieComputation, } // ValidatorFlags contains a list of all the feature flags that apply to the validator client. @@ -192,6 +197,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{ disableCorrectlyPruneCanonicalAtts, disableActiveBalanceCache, enableBatchGossipVerification, + enableBalanceTrieComputation, }...) // E2EBeaconChainFlags contains a list of the beacon chain feature flags to be tested in E2E. diff --git a/encoding/ssz/helpers.go b/encoding/ssz/helpers.go index 04a1b69392..7532a7a89e 100644 --- a/encoding/ssz/helpers.go +++ b/encoding/ssz/helpers.go @@ -8,6 +8,7 @@ import ( "github.com/minio/sha256-simd" "github.com/pkg/errors" "github.com/prysmaticlabs/go-bitfield" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" ) const bytesPerChunk = 32 @@ -113,6 +114,53 @@ func Pack(serializedItems [][]byte) ([][]byte, error) { return chunks, nil } +// PackByChunk a given byte array's final chunk with zeroes if needed. +func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) { + emptyChunk := [bytesPerChunk]byte{} + // If there are no items, we return an empty chunk. + if len(serializedItems) == 0 { + return [][bytesPerChunk]byte{emptyChunk}, nil + } else if len(serializedItems[0]) == bytesPerChunk { + // If each item has exactly BYTES_PER_CHUNK length, we return the list of serialized items. + chunks := make([][bytesPerChunk]byte, 0, len(serializedItems)) + for _, c := range serializedItems { + chunks = append(chunks, bytesutil.ToBytes32(c)) + } + return chunks, nil + } + // We flatten the list in order to pack its items into byte chunks correctly. + var orderedItems []byte + for _, item := range serializedItems { + orderedItems = append(orderedItems, item...) + } + // If all our serialized item slices are length zero, we + // exit early. + if len(orderedItems) == 0 { + return [][bytesPerChunk]byte{emptyChunk}, nil + } + numItems := len(orderedItems) + var chunks [][bytesPerChunk]byte + for i := 0; i < numItems; i += bytesPerChunk { + j := i + bytesPerChunk + // We create our upper bound index of the chunk, if it is greater than numItems, + // we set it as numItems itself. + if j > numItems { + j = numItems + } + // We create chunks from the list of items based on the + // indices determined above. + // Right-pad the last chunk with zero bytes if it does not + // have length bytesPerChunk from the helper. + // The ToBytes32 helper allocates a 32-byte array, before + // copying the ordered items in. This ensures that even if + // the last chunk is != 32 in length, we will right-pad it with + // zero bytes. + chunks = append(chunks, bytesutil.ToBytes32(orderedItems[i:j])) + } + + return chunks, nil +} + // MixInLength appends hash length to root func MixInLength(root [32]byte, length []byte) [32]byte { var hash [32]byte diff --git a/encoding/ssz/helpers_test.go b/encoding/ssz/helpers_test.go index bf0d8842b5..ce24f5e639 100644 --- a/encoding/ssz/helpers_test.go +++ b/encoding/ssz/helpers_test.go @@ -94,6 +94,22 @@ func TestPack(t *testing.T) { } } +func TestPackByChunk(t *testing.T) { + byteSlice2D := [][]byte{ + {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7}, + {1, 1, 2, 3, 5, 8, 13, 21, 34}, + } + expected := [][32]byte{{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7, 1, 1}, + {2, 3, 5, 8, 13, 21, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}} + + result, err := ssz.PackByChunk(byteSlice2D) + require.NoError(t, err) + assert.Equal(t, len(expected), len(result)) + for i, v := range expected { + assert.DeepEqual(t, v, result[i]) + } +} + func TestMixInLength(t *testing.T) { byteSlice := [32]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32} length := []byte{1, 2, 3} diff --git a/testing/spectest/mainnet/altair/epoch_processing/BUILD.bazel b/testing/spectest/mainnet/altair/epoch_processing/BUILD.bazel index 838223c825..87b1b3f6bd 100644 --- a/testing/spectest/mainnet/altair/epoch_processing/BUILD.bazel +++ b/testing/spectest/mainnet/altair/epoch_processing/BUILD.bazel @@ -5,6 +5,7 @@ go_test( size = "small", srcs = [ "effective_balance_updates_test.go", + "epoch_processing_test.go", "eth1_data_reset_test.go", "historical_roots_update_test.go", "inactivity_updates_test.go", @@ -21,5 +22,8 @@ go_test( ], shard_count = 4, tags = ["spectest"], - deps = ["//testing/spectest/shared/altair/epoch_processing:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/altair/epoch_processing:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/altair/epoch_processing/epoch_processing_test.go b/testing/spectest/mainnet/altair/epoch_processing/epoch_processing_test.go new file mode 100644 index 0000000000..edb002ae95 --- /dev/null +++ b/testing/spectest/mainnet/altair/epoch_processing/epoch_processing_test.go @@ -0,0 +1,13 @@ +package epoch_processing + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/config/features" +) + +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} diff --git a/testing/spectest/mainnet/altair/random/BUILD.bazel b/testing/spectest/mainnet/altair/random/BUILD.bazel index 404d7434c5..9adfafd7bb 100644 --- a/testing/spectest/mainnet/altair/random/BUILD.bazel +++ b/testing/spectest/mainnet/altair/random/BUILD.bazel @@ -8,5 +8,8 @@ go_test( "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], - deps = ["//testing/spectest/shared/altair/sanity:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/altair/sanity:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/altair/random/random_test.go b/testing/spectest/mainnet/altair/random/random_test.go index 04b41e2f74..5b60987541 100644 --- a/testing/spectest/mainnet/altair/random/random_test.go +++ b/testing/spectest/mainnet/altair/random/random_test.go @@ -3,9 +3,16 @@ package random import ( "testing" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/testing/spectest/shared/altair/sanity" ) +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + func TestMainnet_Altair_Random(t *testing.T) { sanity.RunBlockProcessingTest(t, "mainnet", "random/random/pyspec_tests") } diff --git a/testing/spectest/mainnet/altair/rewards/BUILD.bazel b/testing/spectest/mainnet/altair/rewards/BUILD.bazel index 6abf6e69c5..ee4c80ff1f 100644 --- a/testing/spectest/mainnet/altair/rewards/BUILD.bazel +++ b/testing/spectest/mainnet/altair/rewards/BUILD.bazel @@ -8,5 +8,8 @@ go_test( "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], - deps = ["//testing/spectest/shared/altair/rewards:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/altair/rewards:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/altair/rewards/rewards_test.go b/testing/spectest/mainnet/altair/rewards/rewards_test.go index baf2a8b4c5..0e9c3d95e9 100644 --- a/testing/spectest/mainnet/altair/rewards/rewards_test.go +++ b/testing/spectest/mainnet/altair/rewards/rewards_test.go @@ -3,9 +3,16 @@ package rewards import ( "testing" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/testing/spectest/shared/altair/rewards" ) +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + func TestMainnet_Altair_Rewards(t *testing.T) { rewards.RunPrecomputeRewardsAndPenaltiesTests(t, "mainnet") } diff --git a/testing/spectest/mainnet/altair/sanity/BUILD.bazel b/testing/spectest/mainnet/altair/sanity/BUILD.bazel index d5ec7b405c..1b3e7b1280 100644 --- a/testing/spectest/mainnet/altair/sanity/BUILD.bazel +++ b/testing/spectest/mainnet/altair/sanity/BUILD.bazel @@ -5,11 +5,15 @@ go_test( size = "medium", srcs = [ "blocks_test.go", + "sanity_test.go", "slots_test.go", ], data = glob(["*.yaml"]) + [ "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], - deps = ["//testing/spectest/shared/altair/sanity:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/altair/sanity:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/altair/sanity/sanity_test.go b/testing/spectest/mainnet/altair/sanity/sanity_test.go new file mode 100644 index 0000000000..469b52d3fc --- /dev/null +++ b/testing/spectest/mainnet/altair/sanity/sanity_test.go @@ -0,0 +1,13 @@ +package sanity + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/config/features" +) + +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} diff --git a/testing/spectest/mainnet/phase0/epoch_processing/BUILD.bazel b/testing/spectest/mainnet/phase0/epoch_processing/BUILD.bazel index 81dad5831b..fd31993271 100644 --- a/testing/spectest/mainnet/phase0/epoch_processing/BUILD.bazel +++ b/testing/spectest/mainnet/phase0/epoch_processing/BUILD.bazel @@ -22,6 +22,7 @@ go_test( shard_count = 4, tags = ["spectest"], deps = [ + "//config/features:go_default_library", "//config/params:go_default_library", "//testing/spectest/shared/phase0/epoch_processing:go_default_library", ], diff --git a/testing/spectest/mainnet/phase0/epoch_processing/epoch_processing_test.go b/testing/spectest/mainnet/phase0/epoch_processing/epoch_processing_test.go index b610764a1c..43a4d03a86 100644 --- a/testing/spectest/mainnet/phase0/epoch_processing/epoch_processing_test.go +++ b/testing/spectest/mainnet/phase0/epoch_processing/epoch_processing_test.go @@ -3,6 +3,7 @@ package epoch_processing import ( "testing" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" ) @@ -12,6 +13,8 @@ func TestMain(m *testing.M) { c := params.BeaconConfig() c.MinGenesisActiveValidatorCount = 16384 params.OverrideBeaconConfig(c) + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() m.Run() } diff --git a/testing/spectest/mainnet/phase0/random/BUILD.bazel b/testing/spectest/mainnet/phase0/random/BUILD.bazel index bc450e49b3..07d61cc785 100644 --- a/testing/spectest/mainnet/phase0/random/BUILD.bazel +++ b/testing/spectest/mainnet/phase0/random/BUILD.bazel @@ -8,5 +8,8 @@ go_test( "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], - deps = ["//testing/spectest/shared/phase0/sanity:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/phase0/sanity:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/phase0/random/random_test.go b/testing/spectest/mainnet/phase0/random/random_test.go index 609a7e18ed..486aa3cbe8 100644 --- a/testing/spectest/mainnet/phase0/random/random_test.go +++ b/testing/spectest/mainnet/phase0/random/random_test.go @@ -3,9 +3,16 @@ package random import ( "testing" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/testing/spectest/shared/phase0/sanity" ) +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + func TestMainnet_Phase0_Random(t *testing.T) { sanity.RunBlockProcessingTest(t, "mainnet", "random/random/pyspec_tests") } diff --git a/testing/spectest/mainnet/phase0/rewards/BUILD.bazel b/testing/spectest/mainnet/phase0/rewards/BUILD.bazel index 91761b53a4..ac03295973 100644 --- a/testing/spectest/mainnet/phase0/rewards/BUILD.bazel +++ b/testing/spectest/mainnet/phase0/rewards/BUILD.bazel @@ -8,5 +8,8 @@ go_test( "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], - deps = ["//testing/spectest/shared/phase0/rewards:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/phase0/rewards:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/phase0/rewards/rewards_test.go b/testing/spectest/mainnet/phase0/rewards/rewards_test.go index 0e1b4a4f69..d448bf829a 100644 --- a/testing/spectest/mainnet/phase0/rewards/rewards_test.go +++ b/testing/spectest/mainnet/phase0/rewards/rewards_test.go @@ -3,9 +3,16 @@ package rewards import ( "testing" + "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/testing/spectest/shared/phase0/rewards" ) +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + func TestMainnet_Phase0_Rewards(t *testing.T) { rewards.RunPrecomputeRewardsAndPenaltiesTests(t, "mainnet") } diff --git a/testing/spectest/mainnet/phase0/sanity/BUILD.bazel b/testing/spectest/mainnet/phase0/sanity/BUILD.bazel index 8d43eb9041..f08fc64ccb 100644 --- a/testing/spectest/mainnet/phase0/sanity/BUILD.bazel +++ b/testing/spectest/mainnet/phase0/sanity/BUILD.bazel @@ -5,11 +5,15 @@ go_test( size = "medium", srcs = [ "blocks_test.go", + "sanity_test.go", "slots_test.go", ], data = glob(["*.yaml"]) + [ "@consensus_spec_tests_mainnet//:test_data", ], tags = ["spectest"], - deps = ["//testing/spectest/shared/phase0/sanity:go_default_library"], + deps = [ + "//config/features:go_default_library", + "//testing/spectest/shared/phase0/sanity:go_default_library", + ], ) diff --git a/testing/spectest/mainnet/phase0/sanity/sanity_test.go b/testing/spectest/mainnet/phase0/sanity/sanity_test.go new file mode 100644 index 0000000000..469b52d3fc --- /dev/null +++ b/testing/spectest/mainnet/phase0/sanity/sanity_test.go @@ -0,0 +1,13 @@ +package sanity + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/config/features" +) + +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} From 905e0f4c1cb277637910a12df2e2351eaf4b4a44 Mon Sep 17 00:00:00 2001 From: Potuz Date: Fri, 19 Nov 2021 12:34:28 -0300 Subject: [PATCH 10/45] Monitor metrics (#9921) Co-authored-by: Raul Jordan --- beacon-chain/monitor/BUILD.bazel | 3 + beacon-chain/monitor/metrics.go | 70 +++++++++++++++++++++ beacon-chain/monitor/process_attestation.go | 5 ++ beacon-chain/monitor/service.go | 5 -- 4 files changed, 78 insertions(+), 5 deletions(-) create mode 100644 beacon-chain/monitor/metrics.go diff --git a/beacon-chain/monitor/BUILD.bazel b/beacon-chain/monitor/BUILD.bazel index 8bdf75e6e6..b46e1ef767 100644 --- a/beacon-chain/monitor/BUILD.bazel +++ b/beacon-chain/monitor/BUILD.bazel @@ -4,6 +4,7 @@ go_library( name = "go_default_library", srcs = [ "doc.go", + "metrics.go", "process_attestation.go", "process_block.go", "process_exit.go", @@ -24,6 +25,8 @@ go_library( "//proto/prysm/v1alpha1/block:go_default_library", "//runtime/version:go_default_library", "//time/slots:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", ], diff --git a/beacon-chain/monitor/metrics.go b/beacon-chain/monitor/metrics.go new file mode 100644 index 0000000000..c9bab3532f --- /dev/null +++ b/beacon-chain/monitor/metrics.go @@ -0,0 +1,70 @@ +package monitor + +import ( + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/sirupsen/logrus" +) + +var ( + log = logrus.WithField("prefix", "monitor") + // TODO: The Prometheus gauge vectors and counters in this package deprecate the + // corresponding gauge vectors and counters in the validator client. + + // inclusionSlotGauge used to track attestation inclusion distance + inclusionSlotGauge = promauto.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "monitor", + Name: "inclusion_slot", + Help: "Attestations inclusion slot", + }, + []string{ + "validator_index", + }, + ) + // timelyHeadCounter used to track attestation timely head flags + timelyHeadCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "monitor", + Name: "timely_head", + Help: "Attestation timely Head flag", + }, + []string{ + "validator_index", + }, + ) + // timelyTargetCounter used to track attestation timely head flags + timelyTargetCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "monitor", + Name: "timely_target", + Help: "Attestation timely Target flag", + }, + []string{ + "validator_index", + }, + ) + // timelySourceCounter used to track attestation timely head flags + timelySourceCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "monitor", + Name: "timely_source", + Help: "Attestation timely Source flag", + }, + []string{ + "validator_index", + }, + ) + + // aggregationCounter used to track aggregations + aggregationCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "monitor", + Name: "aggregations", + Help: "Number of aggregation duties performed", + }, + []string{ + "validator_index", + }, + ) +) diff --git a/beacon-chain/monitor/process_attestation.go b/beacon-chain/monitor/process_attestation.go index c0aafc24f9..364bae5d79 100644 --- a/beacon-chain/monitor/process_attestation.go +++ b/beacon-chain/monitor/process_attestation.go @@ -92,6 +92,7 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be latestPerf.balance = balance latestPerf.attestedSlot = att.Data.Slot latestPerf.inclusionSlot = state.Slot() + inclusionSlotGauge.WithLabelValues(fmt.Sprintf("%d", idx)).Set(float64(latestPerf.inclusionSlot)) aggregatedPerf.totalDistance += uint64(latestPerf.inclusionSlot - latestPerf.attestedSlot) if state.Version() == version.Altair { @@ -135,12 +136,15 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be latestPerf.timelyTarget = hasFlag if latestPerf.timelySource { + timelySourceCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc() aggregatedPerf.totalCorrectSource++ } if latestPerf.timelyHead { + timelyHeadCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc() aggregatedPerf.totalCorrectHead++ } if latestPerf.timelyTarget { + timelyTargetCounter.WithLabelValues(fmt.Sprintf("%d", idx)).Inc() aggregatedPerf.totalCorrectTarget++ } } @@ -190,6 +194,7 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex] aggregatedPerf.totalAggregations++ s.aggregatedPerformance[att.AggregatorIndex] = aggregatedPerf + aggregationCounter.WithLabelValues(fmt.Sprintf("%d", att.AggregatorIndex)).Inc() } var root [32]byte diff --git a/beacon-chain/monitor/service.go b/beacon-chain/monitor/service.go index 10a78e7123..39746d43a1 100644 --- a/beacon-chain/monitor/service.go +++ b/beacon-chain/monitor/service.go @@ -3,11 +3,6 @@ package monitor import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" - "github.com/sirupsen/logrus" -) - -var ( - log = logrus.WithField("prefix", "monitor") ) // ValidatorLatestPerformance keeps track of the latest participation of the validator From 39c33b82ad22fcf39b5df4dbca78e178cee09de1 Mon Sep 17 00:00:00 2001 From: kasey <489222+kasey@users.noreply.github.com> Date: Fri, 19 Nov 2021 09:59:26 -0600 Subject: [PATCH 11/45] Switch to lazy state balance cache (#9822) * quick lazy balance cache proof of concept * WIP refactoring to use lazy cache * updating tests to use functional opts * updating the rest of the tests, all passing * use mock stategen where possible reduces the number of test cases that require db setup * rename test opt method for clear link * Update beacon-chain/blockchain/process_block.go Co-authored-by: terence tsao * test assumption that zerohash is in db * remove unused MockDB (mocking stategen instead) * fix cache bug, switch to sync.Mutex * improve test coverage for the state cache * uncomment failing genesis test for discussion * gofmt * remove unused Service struct member * cleanup unused func input * combining type declaration in signature * don't export the state cache constructor * work around blockchain deps w/ new file service_test brings in a ton of dependencies that make bazel rules for blockchain complex, so just sticking these mocks in their own file simplifies things. * gofmt * remove intentionally failing test this test established that the zero root can't be used to look up the state, resulting in a change in another PR to update stategen to use the GenesisState db method instead when the zero root is detected. * fixed error introduced by develop refresh * fix import ordering * appease deepsource * remove unused function * godoc comments on new requires/assert * defensive constructor per terence's PR comment * more differentiated balance cache metric names Co-authored-by: kasey Co-authored-by: terence tsao Co-authored-by: Raul Jordan Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> --- beacon-chain/blockchain/BUILD.bazel | 3 + beacon-chain/blockchain/head.go | 55 ----- beacon-chain/blockchain/head_test.go | 6 +- beacon-chain/blockchain/info_test.go | 20 +- beacon-chain/blockchain/metrics.go | 8 + beacon-chain/blockchain/mock_test.go | 50 ++++ beacon-chain/blockchain/options.go | 7 + .../blockchain/process_attestation_test.go | 100 ++++---- beacon-chain/blockchain/process_block.go | 7 +- .../blockchain/process_block_helpers.go | 17 +- beacon-chain/blockchain/process_block_test.go | 183 +++++++------- .../blockchain/receive_attestation.go | 8 +- .../blockchain/receive_attestation_test.go | 26 +- beacon-chain/blockchain/receive_block_test.go | 80 +++---- beacon-chain/blockchain/service.go | 19 +- beacon-chain/blockchain/service_test.go | 31 ++- .../blockchain/state_balance_cache.go | 82 +++++++ .../blockchain/state_balance_cache_test.go | 225 ++++++++++++++++++ testing/assertions/assertions.go | 11 + testing/require/requires.go | 6 + 20 files changed, 627 insertions(+), 317 deletions(-) create mode 100644 beacon-chain/blockchain/mock_test.go create mode 100644 beacon-chain/blockchain/state_balance_cache.go create mode 100644 beacon-chain/blockchain/state_balance_cache_test.go diff --git a/beacon-chain/blockchain/BUILD.bazel b/beacon-chain/blockchain/BUILD.bazel index e54a755ee2..6f8d7e9b4a 100644 --- a/beacon-chain/blockchain/BUILD.bazel +++ b/beacon-chain/blockchain/BUILD.bazel @@ -18,6 +18,7 @@ go_library( "receive_attestation.go", "receive_block.go", "service.go", + "state_balance_cache.go", "weak_subjectivity_checks.go", ], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/blockchain", @@ -95,6 +96,7 @@ go_test( "init_test.go", "log_test.go", "metrics_test.go", + "mock_test.go", "process_attestation_test.go", "process_block_test.go", "receive_attestation_test.go", @@ -141,6 +143,7 @@ go_test( "chain_info_norace_test.go", "checktags_test.go", "init_test.go", + "mock_test.go", "receive_block_test.go", "service_norace_test.go", ], diff --git a/beacon-chain/blockchain/head.go b/beacon-chain/blockchain/head.go index a98a77f084..6a478c61a3 100644 --- a/beacon-chain/blockchain/head.go +++ b/beacon-chain/blockchain/head.go @@ -10,7 +10,6 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" - "github.com/prysmaticlabs/prysm/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray" "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/config/features" @@ -42,9 +41,6 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error { // ensure head gets its best justified info. if s.bestJustifiedCheckpt.Epoch > s.justifiedCheckpt.Epoch { s.justifiedCheckpt = s.bestJustifiedCheckpt - if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil { - return err - } } // Get head from the fork choice service. @@ -273,57 +269,6 @@ func (s *Service) hasHeadState() bool { return s.head != nil && s.head.state != nil } -// This caches justified state balances to be used for fork choice. -func (s *Service) cacheJustifiedStateBalances(ctx context.Context, justifiedRoot [32]byte) error { - if err := s.cfg.BeaconDB.SaveBlocks(ctx, s.getInitSyncBlocks()); err != nil { - return err - } - - s.clearInitSyncBlocks() - - var justifiedState state.BeaconState - var err error - if justifiedRoot == s.genesisRoot { - justifiedState, err = s.cfg.BeaconDB.GenesisState(ctx) - if err != nil { - return err - } - } else { - justifiedState, err = s.cfg.StateGen.StateByRoot(ctx, justifiedRoot) - if err != nil { - return err - } - } - if justifiedState == nil || justifiedState.IsNil() { - return errors.New("justified state can't be nil") - } - - epoch := time.CurrentEpoch(justifiedState) - - justifiedBalances := make([]uint64, justifiedState.NumValidators()) - if err := justifiedState.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error { - if helpers.IsActiveValidatorUsingTrie(val, epoch) { - justifiedBalances[idx] = val.EffectiveBalance() - } else { - justifiedBalances[idx] = 0 - } - return nil - }); err != nil { - return err - } - - s.justifiedBalancesLock.Lock() - defer s.justifiedBalancesLock.Unlock() - s.justifiedBalances = justifiedBalances - return nil -} - -func (s *Service) getJustifiedBalances() []uint64 { - s.justifiedBalancesLock.RLock() - defer s.justifiedBalancesLock.RUnlock() - return s.justifiedBalances -} - // Notifies a common event feed of a new chain head event. Called right after a new // chain head is determined, set, and saved to disk. func (s *Service) notifyNewHeadEvent( diff --git a/beacon-chain/blockchain/head_test.go b/beacon-chain/blockchain/head_test.go index 1a2e87e869..faf2ccb95c 100644 --- a/beacon-chain/blockchain/head_test.go +++ b/beacon-chain/blockchain/head_test.go @@ -123,13 +123,15 @@ func TestSaveHead_Different_Reorg(t *testing.T) { func TestCacheJustifiedStateBalances_CanCache(t *testing.T) { beaconDB := testDB.SetupDB(t) service := setupBeaconChain(t, beaconDB) + ctx := context.Background() state, _ := util.DeterministicGenesisState(t, 100) r := [32]byte{'a'} require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: r[:]})) require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r)) - require.NoError(t, service.cacheJustifiedStateBalances(context.Background(), r)) - require.DeepEqual(t, service.getJustifiedBalances(), state.Balances(), "Incorrect justified balances") + balances, err := service.justifiedBalances.get(ctx, r) + require.NoError(t, err) + require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances") } func TestUpdateHead_MissingJustifiedRoot(t *testing.T) { diff --git a/beacon-chain/blockchain/info_test.go b/beacon-chain/blockchain/info_test.go index c8a5c88854..607dde95b9 100644 --- a/beacon-chain/blockchain/info_test.go +++ b/beacon-chain/blockchain/info_test.go @@ -25,18 +25,18 @@ func TestService_TreeHandler(t *testing.T) { headState, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, headState.SetBalances([]uint64{params.BeaconConfig().GweiPerEth})) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New( - 0, // justifiedEpoch - 0, // finalizedEpoch - [32]byte{'a'}, - ), - StateGen: stategen.New(beaconDB), + fcs := protoarray.New( + 0, // justifiedEpoch + 0, // finalizedEpoch + [32]byte{'a'}, + ) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), } - s, err := NewService(ctx) + s, err := NewService(ctx, opts...) require.NoError(t, err) - s.cfg = cfg require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 0, [32]byte{'a'}, [32]byte{'g'}, [32]byte{'c'}, 0, 0)) require.NoError(t, s.cfg.ForkChoiceStore.ProcessBlock(ctx, 1, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'c'}, 0, 0)) s.setHead([32]byte{'a'}, wrapper.WrappedPhase0SignedBeaconBlock(util.NewBeaconBlock()), headState) diff --git a/beacon-chain/blockchain/metrics.go b/beacon-chain/blockchain/metrics.go index c80a372713..e6a60c17ce 100644 --- a/beacon-chain/blockchain/metrics.go +++ b/beacon-chain/blockchain/metrics.go @@ -130,6 +130,14 @@ var ( Name: "sync_head_state_hit", Help: "The number of sync head state requests that are present in the cache.", }) + stateBalanceCacheHit = promauto.NewCounter(prometheus.CounterOpts{ + Name: "state_balance_cache_hit", + Help: "Count the number of state balance cache hits.", + }) + stateBalanceCacheMiss = promauto.NewCounter(prometheus.CounterOpts{ + Name: "state_balance_cache_miss", + Help: "Count the number of state balance cache hits.", + }) ) // reportSlotMetrics reports slot related metrics. diff --git a/beacon-chain/blockchain/mock_test.go b/beacon-chain/blockchain/mock_test.go new file mode 100644 index 0000000000..8a6cf27f76 --- /dev/null +++ b/beacon-chain/blockchain/mock_test.go @@ -0,0 +1,50 @@ +package blockchain + +import ( + "context" + "errors" + "testing" + + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray" + "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" +) + +func testServiceOptsWithDB(t *testing.T) []Option { + beaconDB := testDB.SetupDB(t) + fcs := protoarray.New(0, 0, [32]byte{'a'}) + return []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + } +} + +// warning: only use these opts when you are certain there are no db calls +// in your code path. this is a lightweight way to satisfy the stategen/beacondb +// initialization requirements w/o the overhead of db init. +func testServiceOptsNoDB() []Option { + return []Option{ + withStateBalanceCache(satisfactoryStateBalanceCache()), + } +} + +type mockStateByRooter struct { + state state.BeaconState + err error +} + +var _ stateByRooter = &mockStateByRooter{} + +func (m mockStateByRooter) StateByRoot(_ context.Context, _ [32]byte) (state.BeaconState, error) { + return m.state, m.err +} + +// returns an instance of the state balance cache that can be used +// to satisfy the requirement for one in NewService, but which will +// always return an error if used. +func satisfactoryStateBalanceCache() *stateBalanceCache { + err := errors.New("satisfactoryStateBalanceCache doesn't perform real caching") + return &stateBalanceCache{stateGen: mockStateByRooter{err: err}} +} diff --git a/beacon-chain/blockchain/options.go b/beacon-chain/blockchain/options.go index 2a63841f6f..abc604e64d 100644 --- a/beacon-chain/blockchain/options.go +++ b/beacon-chain/blockchain/options.go @@ -130,6 +130,13 @@ func WithSlasherAttestationsFeed(f *event.Feed) Option { } } +func withStateBalanceCache(c *stateBalanceCache) Option { + return func(s *Service) error { + s.justifiedBalances = c + return nil + } +} + // WithFinalizedStateAtStartUp to store finalized state at start up. func WithFinalizedStateAtStartUp(st state.BeaconState) Option { return func(s *Service) error { diff --git a/beacon-chain/blockchain/process_attestation_test.go b/beacon-chain/blockchain/process_attestation_test.go index bbe3d4b251..34c4db20d4 100644 --- a/beacon-chain/blockchain/process_attestation_test.go +++ b/beacon-chain/blockchain/process_attestation_test.go @@ -24,14 +24,13 @@ func TestStore_OnAttestation_ErrorConditions(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})), + WithStateGen(stategen.New(beaconDB)), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg _, err = blockTree1(t, beaconDB, []byte{'g'}) require.NoError(t, err) @@ -131,14 +130,14 @@ func TestStore_OnAttestation_Ok(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), - StateGen: stategen.New(beaconDB), + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg genesisState, pks := util.DeterministicGenesisState(t, 64) require.NoError(t, genesisState.SetGenesisTime(uint64(time.Now().Unix())-params.BeaconConfig().SecondsPerSlot)) require.NoError(t, service.saveGenesisData(ctx, genesisState)) @@ -157,13 +156,12 @@ func TestStore_SaveCheckpointState(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg s, err := util.NewBeaconState() require.NoError(t, err) @@ -230,13 +228,12 @@ func TestStore_UpdateCheckpointState(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg epoch := types.Epoch(1) baseState, _ := util.DeterministicGenesisState(t, 1) @@ -268,12 +265,10 @@ func TestStore_UpdateCheckpointState(t *testing.T) { func TestAttEpoch_MatchPrevEpoch(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsNoDB() + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)})) @@ -281,12 +276,10 @@ func TestAttEpoch_MatchPrevEpoch(t *testing.T) { func TestAttEpoch_MatchCurrentEpoch(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsNoDB() + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg nowTime := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot require.NoError(t, service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Epoch: 1})) @@ -294,12 +287,10 @@ func TestAttEpoch_MatchCurrentEpoch(t *testing.T) { func TestAttEpoch_NotMatch(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsNoDB() + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg nowTime := 2 * uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().SecondsPerSlot err = service.verifyAttTargetEpoch(ctx, 0, nowTime, ðpb.Checkpoint{Root: make([]byte, 32)}) @@ -308,12 +299,9 @@ func TestAttEpoch_NotMatch(t *testing.T) { func TestVerifyBeaconBlock_NoBlock(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg d := util.HydrateAttestationData(ðpb.AttestationData{}) assert.ErrorContains(t, "signed beacon block can't be nil", service.verifyBeaconBlock(ctx, d)) @@ -321,12 +309,10 @@ func TestVerifyBeaconBlock_NoBlock(t *testing.T) { func TestVerifyBeaconBlock_futureBlock(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b := util.NewBeaconBlock() b.Block.Slot = 2 @@ -340,12 +326,10 @@ func TestVerifyBeaconBlock_futureBlock(t *testing.T) { func TestVerifyBeaconBlock_OK(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b := util.NewBeaconBlock() b.Block.Slot = 2 @@ -361,10 +345,14 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b32 := util.NewBeaconBlock() b32.Block.Slot = 32 @@ -387,12 +375,10 @@ func TestVerifyFinalizedConsistency_InconsistentRoot(t *testing.T) { func TestVerifyFinalizedConsistency_OK(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b32 := util.NewBeaconBlock() b32.Block.Slot = 32 @@ -415,12 +401,10 @@ func TestVerifyFinalizedConsistency_OK(t *testing.T) { func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b32 := util.NewBeaconBlock() b32.Block.Slot = 32 diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 364df7da23..1a26b9438a 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -151,7 +151,12 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b s.finalizedCheckpt = postState.FinalizedCheckpoint() } - if err := s.updateHead(ctx, s.getJustifiedBalances()); err != nil { + balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)) + if err != nil { + msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", s.justifiedCheckpt.Root) + return errors.Wrap(err, msg) + } + if err := s.updateHead(ctx, balances); err != nil { log.WithError(err).Warn("Could not update head") } diff --git a/beacon-chain/blockchain/process_block_helpers.go b/beacon-chain/blockchain/process_block_helpers.go index 7fe3cecca8..be66f7f1c3 100644 --- a/beacon-chain/blockchain/process_block_helpers.go +++ b/beacon-chain/blockchain/process_block_helpers.go @@ -193,9 +193,6 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco if canUpdate { s.prevJustifiedCheckpt = s.justifiedCheckpt s.justifiedCheckpt = cpt - if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil { - return err - } } return nil @@ -206,12 +203,13 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco // This method does not have defense against fork choice bouncing attack, which is why it's only recommend to be used during initial syncing. func (s *Service) updateJustifiedInitSync(ctx context.Context, cp *ethpb.Checkpoint) error { s.prevJustifiedCheckpt = s.justifiedCheckpt - s.justifiedCheckpt = cp - if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil { + + if err := s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp); err != nil { return err } + s.justifiedCheckpt = cp - return s.cfg.BeaconDB.SaveJustifiedCheckpoint(ctx, cp) + return nil } func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) error { @@ -330,7 +328,9 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state. if !attestation.CheckPointIsEqual(s.justifiedCheckpt, state.CurrentJustifiedCheckpoint()) { if state.CurrentJustifiedCheckpoint().Epoch > s.justifiedCheckpt.Epoch { s.justifiedCheckpt = state.CurrentJustifiedCheckpoint() - return s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)) + // we don't need to check if the previous justified checkpoint was an ancestor since the new + // finalized checkpoint is overriding it. + return nil } // Update justified if store justified is not in chain with finalized check point. @@ -345,9 +345,6 @@ func (s *Service) finalizedImpliesNewJustified(ctx context.Context, state state. } if !bytes.Equal(anc, s.finalizedCheckpt.Root) { s.justifiedCheckpt = state.CurrentJustifiedCheckpoint() - if err := s.cacheJustifiedStateBalances(ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)); err != nil { - return err - } } } return nil diff --git a/beacon-chain/blockchain/process_block_test.go b/beacon-chain/blockchain/process_block_test.go index e42418e3f0..0d56be28d1 100644 --- a/beacon-chain/blockchain/process_block_test.go +++ b/beacon-chain/blockchain/process_block_test.go @@ -35,16 +35,17 @@ import ( func TestStore_OnBlock(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), - ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), + beaconDB := testDB.SetupDB(t) + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), } - service, err := NewService(ctx) + + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg genesisStateRoot := [32]byte{} genesis := blocks.NewGenesisBlock(genesisStateRoot[:]) assert.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(genesis))) @@ -134,13 +135,12 @@ func TestStore_OnBlockBatch(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg genesisStateRoot := [32]byte{} genesis := blocks.NewGenesisBlock(genesisStateRoot[:]) @@ -186,14 +186,12 @@ func TestStore_OnBlockBatch(t *testing.T) { func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) params.UseMinimalConfig() defer params.UseMainnetConfig() - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg service.genesisTime = time.Now() update, err := service.shouldUpdateCurrentJustified(ctx, ðpb.Checkpoint{Root: make([]byte, 32)}) @@ -221,16 +219,13 @@ func TestRemoveStateSinceLastFinalized_EmptyStartSlot(t *testing.T) { func TestShouldUpdateJustified_ReturnFalse(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) params.UseMinimalConfig() defer params.UseMainnetConfig() - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{}) - lastJustifiedBlk := util.NewBeaconBlock() lastJustifiedBlk.Block.ParentRoot = bytesutil.PadTo([]byte{'G'}, 32) lastJustifiedRoot, err := lastJustifiedBlk.Block.HashTreeRoot() @@ -255,13 +250,12 @@ func TestCachedPreState_CanGetFromStateSummary(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg s, err := v1.InitializeFromProto(ðpb.BeaconState{Slot: 1, GenesisValidatorsRoot: params.BeaconConfig().ZeroHash[:]}) require.NoError(t, err) @@ -289,13 +283,12 @@ func TestCachedPreState_CanGetFromDB(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg genesisStateRoot := [32]byte{} genesis := blocks.NewGenesisBlock(genesisStateRoot[:]) @@ -327,10 +320,13 @@ func TestUpdateJustified_CouldUpdateBest(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg signedBlock := util.NewBeaconBlock() require.NoError(t, beaconDB.SaveBlock(ctx, wrapper.WrappedPhase0SignedBeaconBlock(signedBlock))) @@ -361,10 +357,12 @@ func TestFillForkChoiceMissingBlocks_CanSave(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'}) service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)} @@ -400,10 +398,12 @@ func TestFillForkChoiceMissingBlocks_RootsMatch(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'}) service.finalizedCheckpt = ðpb.Checkpoint{Root: make([]byte, 32)} @@ -442,10 +442,12 @@ func TestFillForkChoiceMissingBlocks_FilterFinalized(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg service.cfg.ForkChoiceStore = protoarray.New(0, 0, [32]byte{'A'}) // Set finalized epoch to 1. service.finalizedCheckpt = ðpb.Checkpoint{Epoch: 1} @@ -586,7 +588,8 @@ func TestCurrentSlot_HandlesOverflow(t *testing.T) { } func TestAncestorByDB_CtxErr(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) cancel() @@ -598,10 +601,14 @@ func TestAncestor_HandleSkipSlot(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -642,10 +649,9 @@ func TestAncestor_HandleSkipSlot(t *testing.T) { func TestAncestor_CanUseForkchoice(t *testing.T) { ctx := context.Background() - cfg := &config{ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -682,10 +688,14 @@ func TestAncestor_CanUseDB(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + } + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b1 := util.NewBeaconBlock() b1.Block.Slot = 1 @@ -720,10 +730,9 @@ func TestAncestor_CanUseDB(t *testing.T) { func TestEnsureRootNotZeroHashes(t *testing.T) { ctx := context.Background() - cfg := &config{} - service, err := NewService(ctx) + opts := testServiceOptsNoDB() + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg service.genesisRoot = [32]byte{'a'} r := service.ensureRootNotZeros(params.BeaconConfig().ZeroHash) @@ -735,6 +744,12 @@ func TestEnsureRootNotZeroHashes(t *testing.T) { func TestFinalizedImpliesNewJustified(t *testing.T) { beaconDB := testDB.SetupDB(t) + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + } ctx := context.Background() type args struct { cachedCheckPoint *ethpb.Checkpoint @@ -776,9 +791,8 @@ func TestFinalizedImpliesNewJustified(t *testing.T) { beaconState, err := util.NewBeaconState() require.NoError(t, err) require.NoError(t, beaconState.SetCurrentJustifiedCheckpoint(test.args.stateCheckPoint)) - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} service.justifiedCheckpt = test.args.cachedCheckPoint require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: bytesutil.PadTo(test.want.Root, 32)})) genesisState, err := util.NewBeaconState() @@ -815,6 +829,12 @@ func TestVerifyBlkDescendant(t *testing.T) { beaconDB := testDB.SetupDB(t) ctx := context.Background() + fcs := protoarray.New(0, 0, [32]byte{'a'}) + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + } b := util.NewBeaconBlock() b.Block.Slot = 1 r, err := b.Block.HashTreeRoot() @@ -869,9 +889,8 @@ func TestVerifyBlkDescendant(t *testing.T) { }, } for _, tt := range tests { - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB), ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} service.finalizedCheckpt = ðpb.Checkpoint{ Root: tt.args.finalizedRoot[:], } @@ -885,12 +904,10 @@ func TestVerifyBlkDescendant(t *testing.T) { } func TestUpdateJustifiedInitSync(t *testing.T) { - beaconDB := testDB.SetupDB(t) ctx := context.Background() - cfg := &config{BeaconDB: beaconDB} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg gBlk := util.NewBeaconBlock() gRoot, err := gBlk.Block.HashTreeRoot() @@ -916,10 +933,9 @@ func TestUpdateJustifiedInitSync(t *testing.T) { func TestHandleEpochBoundary_BadMetrics(t *testing.T) { ctx := context.Background() - cfg := &config{} - service, err := NewService(ctx) + opts := testServiceOptsNoDB() + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg s, err := util.NewBeaconState() require.NoError(t, err) @@ -931,10 +947,9 @@ func TestHandleEpochBoundary_BadMetrics(t *testing.T) { func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) { ctx := context.Background() - cfg := &config{} - service, err := NewService(ctx) + opts := testServiceOptsNoDB() + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg s, _ := util.DeterministicGenesisState(t, 1024) service.head = &head{state: s} @@ -946,18 +961,18 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) { func TestOnBlock_CanFinalize(t *testing.T) { ctx := context.Background() beaconDB := testDB.SetupDB(t) + fcs := protoarray.New(0, 0, [32]byte{'a'}) depositCache, err := depositcache.New() require.NoError(t, err) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), - ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), - DepositCache: depositCache, - StateNotifier: &mock.MockStateNotifier{}, + opts := []Option{ + WithDatabase(beaconDB), + WithStateGen(stategen.New(beaconDB)), + WithForkChoiceStore(fcs), + WithDepositCache(depositCache), + WithStateNotifier(&mock.MockStateNotifier{}), } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg gs, keys := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) @@ -991,18 +1006,12 @@ func TestOnBlock_CanFinalize(t *testing.T) { func TestInsertFinalizedDeposits(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) + opts := testServiceOptsWithDB(t) depositCache, err := depositcache.New() require.NoError(t, err) - cfg := &config{ - BeaconDB: beaconDB, - StateGen: stategen.New(beaconDB), - ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), - DepositCache: depositCache, - } - service, err := NewService(ctx) + opts = append(opts, WithDepositCache(depositCache)) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg gs, _ := util.DeterministicGenesisState(t, 32) require.NoError(t, service.saveGenesisData(ctx, gs)) diff --git a/beacon-chain/blockchain/receive_attestation.go b/beacon-chain/blockchain/receive_attestation.go index a015fb11a8..1a2f03057f 100644 --- a/beacon-chain/blockchain/receive_attestation.go +++ b/beacon-chain/blockchain/receive_attestation.go @@ -131,7 +131,13 @@ func (s *Service) processAttestationsRoutine(subscribedToStateEvents chan<- stru continue } s.processAttestations(s.ctx) - if err := s.updateHead(s.ctx, s.getJustifiedBalances()); err != nil { + + balances, err := s.justifiedBalances.get(s.ctx, bytesutil.ToBytes32(s.justifiedCheckpt.Root)) + if err != nil { + log.Errorf("Unable to get justified balances for root %v w/ error %s", s.justifiedCheckpt.Root, err) + continue + } + if err := s.updateHead(s.ctx, balances); err != nil { log.Warnf("Resolving fork due to new attestation: %v", err) } } diff --git a/beacon-chain/blockchain/receive_attestation_test.go b/beacon-chain/blockchain/receive_attestation_test.go index 420a92b355..6193bb06db 100644 --- a/beacon-chain/blockchain/receive_attestation_test.go +++ b/beacon-chain/blockchain/receive_attestation_test.go @@ -9,9 +9,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/core/transition" testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" - "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray" "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" @@ -42,12 +40,10 @@ func TestAttestationCheckPtState_FarFutureSlot(t *testing.T) { func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) + opts := testServiceOptsWithDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b32 := util.NewBeaconBlock() b32.Block.Slot = 32 @@ -71,12 +67,10 @@ func TestVerifyLMDFFGConsistent_NotOK(t *testing.T) { func TestVerifyLMDFFGConsistent_OK(t *testing.T) { ctx := context.Background() - beaconDB := testDB.SetupDB(t) - cfg := &config{BeaconDB: beaconDB, ForkChoiceStore: protoarray.New(0, 0, [32]byte{})} - service, err := NewService(ctx) + opts := testServiceOptsWithDB(t) + service, err := NewService(ctx, opts...) require.NoError(t, err) - service.cfg = cfg b32 := util.NewBeaconBlock() b32.Block.Slot = 32 @@ -101,18 +95,12 @@ func TestVerifyLMDFFGConsistent_OK(t *testing.T) { func TestProcessAttestations_Ok(t *testing.T) { hook := logTest.NewGlobal() ctx := context.Background() - beaconDB := testDB.SetupDB(t) + opts := testServiceOptsWithDB(t) + opts = append(opts, WithAttestationPool(attestations.NewPool())) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New(0, 0, [32]byte{}), - StateGen: stategen.New(beaconDB), - AttPool: attestations.NewPool(), - } - service, err := NewService(ctx) + service, err := NewService(ctx, opts...) require.NoError(t, err) service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second) - service.cfg = cfg genesisState, pks := util.DeterministicGenesisState(t, 64) require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot)) require.NoError(t, service.saveGenesisData(ctx, genesisState)) diff --git a/beacon-chain/blockchain/receive_block_test.go b/beacon-chain/blockchain/receive_block_test.go index 5527d6cda6..b79f07158a 100644 --- a/beacon-chain/blockchain/receive_block_test.go +++ b/beacon-chain/blockchain/receive_block_test.go @@ -124,21 +124,16 @@ func TestService_ReceiveBlock(t *testing.T) { genesisBlockRoot := bytesutil.ToBytes32(nil) require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot)) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New( - 0, // justifiedEpoch - 0, // finalizedEpoch - genesisBlockRoot, - ), - AttPool: attestations.NewPool(), - ExitPool: voluntaryexits.NewPool(), - StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true}, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)), + WithAttestationPool(attestations.NewPool()), + WithExitPool(voluntaryexits.NewPool()), + WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}), + WithStateGen(stategen.New(beaconDB)), } - s, err := NewService(ctx) + s, err := NewService(ctx, opts...) require.NoError(t, err) - s.cfg = cfg require.NoError(t, s.saveGenesisData(ctx, genesis)) gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx) require.NoError(t, err) @@ -166,21 +161,17 @@ func TestService_ReceiveBlockUpdateHead(t *testing.T) { beaconDB := testDB.SetupDB(t) genesisBlockRoot := bytesutil.ToBytes32(nil) require.NoError(t, beaconDB.SaveState(ctx, genesis, genesisBlockRoot)) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New( - 0, // justifiedEpoch - 0, // finalizedEpoch - genesisBlockRoot, - ), - AttPool: attestations.NewPool(), - ExitPool: voluntaryexits.NewPool(), - StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true}, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)), + WithAttestationPool(attestations.NewPool()), + WithExitPool(voluntaryexits.NewPool()), + WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}), + WithStateGen(stategen.New(beaconDB)), } - s, err := NewService(ctx) + + s, err := NewService(ctx, opts...) require.NoError(t, err) - s.cfg = cfg require.NoError(t, s.saveGenesisData(ctx, genesis)) gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx) require.NoError(t, err) @@ -250,19 +241,14 @@ func TestService_ReceiveBlockBatch(t *testing.T) { beaconDB := testDB.SetupDB(t) genesisBlockRoot, err := genesis.HashTreeRoot(ctx) require.NoError(t, err) - cfg := &config{ - BeaconDB: beaconDB, - ForkChoiceStore: protoarray.New( - 0, // justifiedEpoch - 0, // finalizedEpoch - genesisBlockRoot, - ), - StateNotifier: &blockchainTesting.MockStateNotifier{RecordEvents: true}, - StateGen: stategen.New(beaconDB), + opts := []Option{ + WithDatabase(beaconDB), + WithForkChoiceStore(protoarray.New(0, 0, genesisBlockRoot)), + WithStateNotifier(&blockchainTesting.MockStateNotifier{RecordEvents: true}), + WithStateGen(stategen.New(beaconDB)), } - s, err := NewService(ctx) + s, err := NewService(ctx, opts...) require.NoError(t, err) - s.cfg = cfg err = s.saveGenesisData(ctx, genesis) require.NoError(t, err) gBlk, err := s.cfg.BeaconDB.GenesisBlock(ctx) @@ -287,9 +273,10 @@ func TestService_ReceiveBlockBatch(t *testing.T) { } func TestService_HasInitSyncBlock(t *testing.T) { - s, err := NewService(context.Background()) + opts := testServiceOptsNoDB() + opts = append(opts, WithStateNotifier(&blockchainTesting.MockStateNotifier{})) + s, err := NewService(context.Background(), opts...) require.NoError(t, err) - s.cfg = &config{StateNotifier: &blockchainTesting.MockStateNotifier{}} r := [32]byte{'a'} if s.HasInitSyncBlock(r) { t.Error("Should not have block") @@ -301,11 +288,10 @@ func TestService_HasInitSyncBlock(t *testing.T) { } func TestCheckSaveHotStateDB_Enabling(t *testing.T) { - beaconDB := testDB.SetupDB(t) + opts := testServiceOptsWithDB(t) hook := logTest.NewGlobal() - s, err := NewService(context.Background()) + s, err := NewService(context.Background(), opts...) require.NoError(t, err) - s.cfg = &config{StateGen: stategen.New(beaconDB)} st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB)) s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second) s.finalizedCheckpt = ðpb.Checkpoint{} @@ -315,11 +301,10 @@ func TestCheckSaveHotStateDB_Enabling(t *testing.T) { } func TestCheckSaveHotStateDB_Disabling(t *testing.T) { - beaconDB := testDB.SetupDB(t) hook := logTest.NewGlobal() - s, err := NewService(context.Background()) + opts := testServiceOptsWithDB(t) + s, err := NewService(context.Background(), opts...) require.NoError(t, err) - s.cfg = &config{StateGen: stategen.New(beaconDB)} s.finalizedCheckpt = ðpb.Checkpoint{} require.NoError(t, s.checkSaveHotStateDB(context.Background())) s.genesisTime = time.Now() @@ -329,11 +314,10 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) { } func TestCheckSaveHotStateDB_Overflow(t *testing.T) { - beaconDB := testDB.SetupDB(t) hook := logTest.NewGlobal() - s, err := NewService(context.Background()) + opts := testServiceOptsWithDB(t) + s, err := NewService(context.Background(), opts...) require.NoError(t, err) - s.cfg = &config{StateGen: stategen.New(beaconDB)} s.finalizedCheckpt = ðpb.Checkpoint{Epoch: 10000000} s.genesisTime = time.Now() diff --git a/beacon-chain/blockchain/service.go b/beacon-chain/blockchain/service.go index b59303262a..61f7c586b0 100644 --- a/beacon-chain/blockchain/service.go +++ b/beacon-chain/blockchain/service.go @@ -62,9 +62,9 @@ type Service struct { checkpointStateCache *cache.CheckpointStateCache initSyncBlocks map[[32]byte]block.SignedBeaconBlock initSyncBlocksLock sync.RWMutex - justifiedBalances []uint64 - justifiedBalancesLock sync.RWMutex - wsVerifier *WeakSubjectivityVerifier + //justifiedBalances []uint64 + justifiedBalances *stateBalanceCache + wsVerifier *WeakSubjectivityVerifier } // config options for the service. @@ -97,7 +97,6 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) { boundaryRoots: [][32]byte{}, checkpointStateCache: cache.NewCheckpointStateCache(), initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock), - justifiedBalances: make([]uint64, 0), cfg: &config{}, } for _, opt := range opts { @@ -106,6 +105,12 @@ func NewService(ctx context.Context, opts ...Option) (*Service, error) { } } var err error + if srv.justifiedBalances == nil { + srv.justifiedBalances, err = newStateBalanceCache(srv.cfg.StateGen) + if err != nil { + return nil, err + } + } srv.wsVerifier, err = NewWeakSubjectivityVerifier(srv.cfg.WeakSubjectivityCheckpt, srv.cfg.BeaconDB) if err != nil { return nil, err @@ -151,9 +156,6 @@ func (s *Service) Start() { // Resume fork choice. s.justifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint) - if err := s.cacheJustifiedStateBalances(s.ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(s.justifiedCheckpt.Root))); err != nil { - log.Fatalf("Could not cache justified state balances: %v", err) - } s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint) s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(justifiedCheckpoint) s.finalizedCheckpt = ethpb.CopyCheckpoint(finalizedCheckpoint) @@ -340,9 +342,6 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon genesisCheckpoint := genesisState.FinalizedCheckpoint() s.justifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint) - if err := s.cacheJustifiedStateBalances(ctx, genesisBlkRoot); err != nil { - return err - } s.prevJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint) s.bestJustifiedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint) s.finalizedCheckpt = ethpb.CopyCheckpoint(genesisCheckpoint) diff --git a/beacon-chain/blockchain/service_test.go b/beacon-chain/blockchain/service_test.go index b855a438af..7d00391508 100644 --- a/beacon-chain/blockchain/service_test.go +++ b/beacon-chain/blockchain/service_test.go @@ -108,25 +108,24 @@ func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service { depositCache, err := depositcache.New() require.NoError(t, err) - cfg := &config{ - BeaconBlockBuf: 0, - BeaconDB: beaconDB, - DepositCache: depositCache, - ChainStartFetcher: web3Service, - P2p: &mockBroadcaster{}, - StateNotifier: &mockBeaconNode{}, - AttPool: attestations.NewPool(), - StateGen: stategen.New(beaconDB), - ForkChoiceStore: protoarray.New(0, 0, params.BeaconConfig().ZeroHash), - AttService: attService, + stateGen := stategen.New(beaconDB) + // Safe a state in stategen to purposes of testing a service stop / shutdown. + require.NoError(t, stateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState)) + + opts := []Option{ + WithDatabase(beaconDB), + WithDepositCache(depositCache), + WithChainStartFetcher(web3Service), + WithAttestationPool(attestations.NewPool()), + WithP2PBroadcaster(&mockBroadcaster{}), + WithStateNotifier(&mockBeaconNode{}), + WithForkChoiceStore(protoarray.New(0, 0, params.BeaconConfig().ZeroHash)), + WithAttestationService(attService), + WithStateGen(stateGen), } - // Safe a state in stategen to purposes of testing a service stop / shutdown. - require.NoError(t, cfg.StateGen.SaveState(ctx, bytesutil.ToBytes32(bState.FinalizedCheckpoint().Root), bState)) - - chainService, err := NewService(ctx) + chainService, err := NewService(ctx, opts...) require.NoError(t, err, "Unable to setup chain service") - chainService.cfg = cfg chainService.genesisTime = time.Unix(1, 0) // non-zero time return chainService diff --git a/beacon-chain/blockchain/state_balance_cache.go b/beacon-chain/blockchain/state_balance_cache.go new file mode 100644 index 0000000000..1bbec57ad0 --- /dev/null +++ b/beacon-chain/blockchain/state_balance_cache.go @@ -0,0 +1,82 @@ +package blockchain + +import ( + "context" + "errors" + "sync" + + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/beacon-chain/core/time" + "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" +) + +var errNilStateFromStategen = errors.New("justified state can't be nil") + +type stateBalanceCache struct { + sync.Mutex + balances []uint64 + root [32]byte + stateGen stateByRooter +} + +type stateByRooter interface { + StateByRoot(context.Context, [32]byte) (state.BeaconState, error) +} + +// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen +// to avoid nil pointer bugs when updating the cache in the read path (get()) +func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) { + if sg == nil { + return nil, errors.New("Can't initialize state balance cache without stategen") + } + return &stateBalanceCache{stateGen: sg}, nil +} + +// update is called by get() when the requested root doesn't match +// the previously read value. This cache assumes we only want to cache one +// set of balances for a single root (the current justified root). +// +// warning: this is not thread-safe on its own, relies on get() for locking +func (c *stateBalanceCache) update(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) { + stateBalanceCacheMiss.Inc() + justifiedState, err := c.stateGen.StateByRoot(ctx, justifiedRoot) + if err != nil { + return nil, err + } + if justifiedState == nil || justifiedState.IsNil() { + return nil, errNilStateFromStategen + } + epoch := time.CurrentEpoch(justifiedState) + + justifiedBalances := make([]uint64, justifiedState.NumValidators()) + var balanceAccumulator = func(idx int, val state.ReadOnlyValidator) error { + if helpers.IsActiveValidatorUsingTrie(val, epoch) { + justifiedBalances[idx] = val.EffectiveBalance() + } else { + justifiedBalances[idx] = 0 + } + return nil + } + if err := justifiedState.ReadFromEveryValidator(balanceAccumulator); err != nil { + return nil, err + } + + c.balances = justifiedBalances + c.root = justifiedRoot + return c.balances, nil +} + +// getBalances takes an explicit justifiedRoot so it can invalidate the singleton cache key +// when the justified root changes, and takes a context so that the long-running stategen +// read path can connect to the upstream cancellation/timeout chain. +func (c *stateBalanceCache) get(ctx context.Context, justifiedRoot [32]byte) ([]uint64, error) { + c.Lock() + defer c.Unlock() + if justifiedRoot == c.root { + stateBalanceCacheHit.Inc() + return c.balances, nil + } + + return c.update(ctx, justifiedRoot) +} diff --git a/beacon-chain/blockchain/state_balance_cache_test.go b/beacon-chain/blockchain/state_balance_cache_test.go new file mode 100644 index 0000000000..e41f350e52 --- /dev/null +++ b/beacon-chain/blockchain/state_balance_cache_test.go @@ -0,0 +1,225 @@ +package blockchain + +import ( + "context" + "encoding/binary" + "errors" + "testing" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/state" + v2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/require" + "github.com/prysmaticlabs/prysm/time/slots" +) + +type mockStateByRoot struct { + state state.BeaconState + err error +} + +func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) { + return m.state, m.err +} + +type testStateOpt func(*ethpb.BeaconStateAltair) + +func testStateWithValidators(v []*ethpb.Validator) testStateOpt { + return func(a *ethpb.BeaconStateAltair) { + a.Validators = v + } +} + +func testStateWithSlot(slot types.Slot) testStateOpt { + return func(a *ethpb.BeaconStateAltair) { + a.Slot = slot + } +} + +func testStateFixture(opts ...testStateOpt) state.BeaconState { + a := ðpb.BeaconStateAltair{} + for _, o := range opts { + o(a) + } + s, _ := v2.InitializeFromProtoUnsafe(a) + return s +} + +func generateTestValidators(count int, opts ...func(*ethpb.Validator)) []*ethpb.Validator { + vs := make([]*ethpb.Validator, count) + var i uint32 = 0 + for ; i < uint32(count); i++ { + pk := make([]byte, 48) + binary.LittleEndian.PutUint32(pk, i) + v := ðpb.Validator{PublicKey: pk} + for _, o := range opts { + o(v) + } + vs[i] = v + } + return vs +} + +func oddValidatorsExpired(currentSlot types.Slot) func(*ethpb.Validator) { + return func(v *ethpb.Validator) { + pki := binary.LittleEndian.Uint64(v.PublicKey) + if pki%2 == 0 { + v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1) + } else { + v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1) + } + } +} + +func oddValidatorsQueued(currentSlot types.Slot) func(*ethpb.Validator) { + return func(v *ethpb.Validator) { + v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1) + pki := binary.LittleEndian.Uint64(v.PublicKey) + if pki%2 == 0 { + v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1) + } else { + v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1) + } + } +} + +func allValidatorsValid(currentSlot types.Slot) func(*ethpb.Validator) { + return func(v *ethpb.Validator) { + v.ActivationEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) - 1) + v.ExitEpoch = types.Epoch(int(slots.ToEpoch(currentSlot)) + 1) + } +} + +func balanceIsKeyTimes2(v *ethpb.Validator) { + pki := binary.LittleEndian.Uint64(v.PublicKey) + v.EffectiveBalance = uint64(pki) * 2 +} + +func testHalfExpiredValidators() ([]*ethpb.Validator, []uint64) { + balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0} + return generateTestValidators(10, + oddValidatorsExpired(types.Slot(99)), + balanceIsKeyTimes2), balances +} + +func testHalfQueuedValidators() ([]*ethpb.Validator, []uint64) { + balances := []uint64{0, 0, 4, 0, 8, 0, 12, 0, 16, 0} + return generateTestValidators(10, + oddValidatorsQueued(types.Slot(99)), + balanceIsKeyTimes2), balances +} + +func testAllValidValidators() ([]*ethpb.Validator, []uint64) { + balances := []uint64{0, 2, 4, 6, 8, 10, 12, 14, 16, 18} + return generateTestValidators(10, + allValidatorsValid(types.Slot(99)), + balanceIsKeyTimes2), balances +} + +func TestStateBalanceCache(t *testing.T) { + type sbcTestCase struct { + err error + root [32]byte + sbc *stateBalanceCache + balances []uint64 + name string + } + sentinelCacheMiss := errors.New("Cache missed, as expected!") + sentinelBalances := []uint64{1, 2, 3, 4, 5} + halfExpiredValidators, halfExpiredBalances := testHalfExpiredValidators() + halfQueuedValidators, halfQueuedBalances := testHalfQueuedValidators() + allValidValidators, allValidBalances := testAllValidValidators() + cases := []sbcTestCase{ + { + root: bytesutil.ToBytes32([]byte{'A'}), + balances: sentinelBalances, + sbc: &stateBalanceCache{ + stateGen: &mockStateByRooter{ + err: sentinelCacheMiss, + }, + root: bytesutil.ToBytes32([]byte{'A'}), + balances: sentinelBalances, + }, + name: "cache hit", + }, + // this works by using a staterooter that returns a known error + // so really we're testing the miss by making sure stategen got called + // this also tells us stategen errors are propagated + { + sbc: &stateBalanceCache{ + stateGen: &mockStateByRooter{ + //state: generateTestValidators(1, testWithBadEpoch), + err: sentinelCacheMiss, + }, + root: bytesutil.ToBytes32([]byte{'B'}), + }, + err: sentinelCacheMiss, + root: bytesutil.ToBytes32([]byte{'A'}), + name: "cache miss", + }, + { + sbc: &stateBalanceCache{ + stateGen: &mockStateByRooter{}, + root: bytesutil.ToBytes32([]byte{'B'}), + }, + err: errNilStateFromStategen, + root: bytesutil.ToBytes32([]byte{'A'}), + name: "error for nil state upon cache miss", + }, + { + sbc: &stateBalanceCache{ + stateGen: &mockStateByRooter{ + state: testStateFixture( + testStateWithSlot(99), + testStateWithValidators(halfExpiredValidators)), + }, + }, + balances: halfExpiredBalances, + root: bytesutil.ToBytes32([]byte{'A'}), + name: "test filtering by exit epoch", + }, + { + sbc: &stateBalanceCache{ + stateGen: &mockStateByRooter{ + state: testStateFixture( + testStateWithSlot(99), + testStateWithValidators(halfQueuedValidators)), + }, + }, + balances: halfQueuedBalances, + root: bytesutil.ToBytes32([]byte{'A'}), + name: "test filtering by activation epoch", + }, + { + sbc: &stateBalanceCache{ + stateGen: &mockStateByRooter{ + state: testStateFixture( + testStateWithSlot(99), + testStateWithValidators(allValidValidators)), + }, + }, + balances: allValidBalances, + root: bytesutil.ToBytes32([]byte{'A'}), + name: "happy path", + }, + } + ctx := context.Background() + for _, c := range cases { + t.Run(c.name, func(t *testing.T) { + cache := c.sbc + cacheRootStart := cache.root + b, err := cache.get(ctx, c.root) + require.ErrorIs(t, err, c.err) + require.DeepEqual(t, c.balances, b) + if c.err != nil { + // if there was an error somewhere, the root should not have changed (unless it already matched) + require.Equal(t, cacheRootStart, cache.root) + } else { + // when successful, the cache should always end with a root matching the request + require.Equal(t, c.root, cache.root) + } + }) + } +} diff --git a/testing/assertions/assertions.go b/testing/assertions/assertions.go index fa867dc85f..e2ec9246f3 100644 --- a/testing/assertions/assertions.go +++ b/testing/assertions/assertions.go @@ -1,6 +1,7 @@ package assertions import ( + "errors" "fmt" "path/filepath" "reflect" @@ -86,6 +87,16 @@ func NoError(loggerFn assertionLoggerFn, err error, msg ...interface{}) { } } +// ErrorIs uses Errors.Is to recursively unwrap err looking for target in the chain. +// If any error in the chain matches target, the assertion will pass. +func ErrorIs(loggerFn assertionLoggerFn, err, target error, msg ...interface{}) { + if !errors.Is(err, target) { + errMsg := parseMsg(fmt.Sprintf("error %s not in chain", target), msg...) + _, file, line, _ := runtime.Caller(2) + loggerFn("%s:%d %s: %v", filepath.Base(file), line, errMsg, err) + } +} + // ErrorContains asserts that actual error contains wanted message. func ErrorContains(loggerFn assertionLoggerFn, want string, err error, msg ...interface{}) { if err == nil || !strings.Contains(err.Error(), want) { diff --git a/testing/require/requires.go b/testing/require/requires.go index 365405d3ec..6dea3cdc16 100644 --- a/testing/require/requires.go +++ b/testing/require/requires.go @@ -71,3 +71,9 @@ func LogsDoNotContain(tb assertions.AssertionTestingTB, hook *test.Hook, want st func NotEmpty(tb assertions.AssertionTestingTB, obj interface{}, msg ...interface{}) { assertions.NotEmpty(tb.Fatalf, obj, msg...) } + +// ErrorIs uses Errors.Is to recursively unwrap err looking for target in the chain. +// If any error in the chain matches target, the assertion will pass. +func ErrorIs(tb assertions.AssertionTestingTB, err, target error, msg ...interface{}) { + assertions.ErrorIs(tb.Fatalf, err, target, msg) +} From 788338a0043651da40283dec7e31585668028dd1 Mon Sep 17 00:00:00 2001 From: Potuz Date: Sat, 20 Nov 2021 11:14:07 -0300 Subject: [PATCH 12/45] Stop packing deposits early if we reach max allowed (#9806) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * Stop packing deposits early if we reach max allowed * Add logs to proposals without deposits * Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go * Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go * Update beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go * reinsert debug log Co-authored-by: RadosÅ‚aw Kapka Co-authored-by: terence tsao Co-authored-by: Nishant Das --- .../v1alpha1/validator/proposer_deposits.go | 20 ++++++++++++------- 1 file changed, 13 insertions(+), 7 deletions(-) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go index 9614f376c2..e1012cbb07 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/proposer_deposits.go @@ -94,7 +94,12 @@ func (vs *Server) deposits( ctx, span := trace.StartSpan(ctx, "ProposerServer.deposits") defer span.End() - if vs.MockEth1Votes || !vs.Eth1InfoFetcher.IsConnectedToETH1() { + if vs.MockEth1Votes { + return []*ethpb.Deposit{}, nil + } + + if !vs.Eth1InfoFetcher.IsConnectedToETH1() { + log.Warn("not connected to eth1 node, skip pending deposit insertion") return []*ethpb.Deposit{}, nil } // Need to fetch if the deposits up to the state's latest eth1 data matches @@ -112,6 +117,7 @@ func (vs *Server) deposits( // If there are no pending deposits, exit early. allPendingContainers := vs.PendingDepositsFetcher.PendingContainers(ctx, canonicalEth1DataHeight) if len(allPendingContainers) == 0 { + log.Debug("no pending deposits for inclusion in block") return []*ethpb.Deposit{}, nil } @@ -127,21 +133,21 @@ func (vs *Server) deposits( if uint64(dep.Index) >= beaconState.Eth1DepositIndex() && uint64(dep.Index) < canonicalEth1Data.DepositCount { pendingDeps = append(pendingDeps, dep) } + // Don't try to pack more than the max allowed in a block + if uint64(len(pendingDeps)) == params.BeaconConfig().MaxDeposits { + break + } } for i := range pendingDeps { - // Don't construct merkle proof if the number of deposits is more than max allowed in block. - if uint64(i) == params.BeaconConfig().MaxDeposits { - break - } pendingDeps[i].Deposit, err = constructMerkleProof(depositTrie, int(pendingDeps[i].Index), pendingDeps[i].Deposit) if err != nil { return nil, err } } - // Limit the return of pending deposits to not be more than max deposits allowed in block. + var pendingDeposits []*ethpb.Deposit - for i := uint64(0); i < uint64(len(pendingDeps)) && i < params.BeaconConfig().MaxDeposits; i++ { + for i := uint64(0); i < uint64(len(pendingDeps)); i++ { pendingDeposits = append(pendingDeposits, pendingDeps[i].Deposit) } return pendingDeposits, nil From 838b19e9857bfc3b806bc0ac6edd928672b6efb8 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 22 Nov 2021 09:37:55 -0800 Subject: [PATCH 13/45] Add getters and setters for beacon state v3 (part 1) (#9915) --- beacon-chain/state/v3/BUILD.bazel | 46 ++- beacon-chain/state/v3/deprecated_getters.go | 16 + .../state/v3/deprecated_getters_test.go | 19 + beacon-chain/state/v3/deprecated_setters.go | 31 ++ .../state/v3/deprecated_setters_test.go | 27 ++ beacon-chain/state/v3/getters_block.go | 99 ++++++ beacon-chain/state/v3/getters_block_test.go | 60 ++++ beacon-chain/state/v3/getters_checkpoint.go | 160 +++++++++ beacon-chain/state/v3/getters_eth1.go | 91 +++++ beacon-chain/state/v3/getters_misc.go | 211 +++++++++++ .../state/v3/getters_participation.go | 53 +++ beacon-chain/state/v3/getters_randao.go | 85 +++++ beacon-chain/state/v3/getters_state.go | 127 +++++++ .../state/v3/getters_sync_committee.go | 69 ++++ beacon-chain/state/v3/getters_test.go | 88 +++++ beacon-chain/state/v3/getters_validator.go | 328 ++++++++++++++++++ .../state/v3/getters_validator_test.go | 20 ++ beacon-chain/state/v3/setters_block.go | 68 ++++ beacon-chain/state/v3/setters_checkpoint.go | 58 ++++ beacon-chain/state/v3/setters_eth1.go | 74 ++++ beacon-chain/state/v3/setters_misc.go | 186 ++++++++++ .../state/v3/setters_participation.go | 89 +++++ beacon-chain/state/v3/setters_randao.go | 53 +++ beacon-chain/state/v3/setters_state.go | 41 +++ .../state/v3/setters_sync_committee.go | 31 ++ beacon-chain/state/v3/setters_validator.go | 265 ++++++++++++++ beacon-chain/state/v3/state_trie.go | 71 ++++ beacon-chain/state/v3/types.go | 29 +- 28 files changed, 2492 insertions(+), 3 deletions(-) create mode 100644 beacon-chain/state/v3/deprecated_getters.go create mode 100644 beacon-chain/state/v3/deprecated_getters_test.go create mode 100644 beacon-chain/state/v3/deprecated_setters.go create mode 100644 beacon-chain/state/v3/deprecated_setters_test.go create mode 100644 beacon-chain/state/v3/getters_block.go create mode 100644 beacon-chain/state/v3/getters_block_test.go create mode 100644 beacon-chain/state/v3/getters_checkpoint.go create mode 100644 beacon-chain/state/v3/getters_eth1.go create mode 100644 beacon-chain/state/v3/getters_misc.go create mode 100644 beacon-chain/state/v3/getters_participation.go create mode 100644 beacon-chain/state/v3/getters_randao.go create mode 100644 beacon-chain/state/v3/getters_state.go create mode 100644 beacon-chain/state/v3/getters_sync_committee.go create mode 100644 beacon-chain/state/v3/getters_test.go create mode 100644 beacon-chain/state/v3/getters_validator.go create mode 100644 beacon-chain/state/v3/getters_validator_test.go create mode 100644 beacon-chain/state/v3/setters_block.go create mode 100644 beacon-chain/state/v3/setters_checkpoint.go create mode 100644 beacon-chain/state/v3/setters_eth1.go create mode 100644 beacon-chain/state/v3/setters_misc.go create mode 100644 beacon-chain/state/v3/setters_participation.go create mode 100644 beacon-chain/state/v3/setters_randao.go create mode 100644 beacon-chain/state/v3/setters_state.go create mode 100644 beacon-chain/state/v3/setters_sync_committee.go create mode 100644 beacon-chain/state/v3/setters_validator.go create mode 100644 beacon-chain/state/v3/state_trie.go diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel index f53886ab40..52301778fe 100644 --- a/beacon-chain/state/v3/BUILD.bazel +++ b/beacon-chain/state/v3/BUILD.bazel @@ -3,32 +3,74 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test") go_library( name = "go_default_library", srcs = [ + "deprecated_getters.go", + "deprecated_setters.go", "field_root_eth1.go", "field_root_validator.go", "field_root_vector.go", "field_roots.go", + "getters_block.go", + "getters_checkpoint.go", + "getters_eth1.go", + "getters_misc.go", + "getters_participation.go", + "getters_randao.go", + "getters_state.go", + "getters_sync_committee.go", + "getters_validator.go", + "setters_block.go", + "setters_checkpoint.go", + "setters_eth1.go", + "setters_misc.go", + "setters_participation.go", + "setters_randao.go", + "setters_state.go", + "setters_sync_committee.go", + "setters_validator.go", + "state_trie.go", "types.go", ], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3", visibility = ["//beacon-chain:__pkg__"], deps = [ + "//beacon-chain/state:go_default_library", "//beacon-chain/state/fieldtrie:go_default_library", "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", + "//beacon-chain/state/v1:go_default_library", "//config/features:go_default_library", "//config/params:go_default_library", "//crypto/hash:go_default_library", "//encoding/bytesutil:go_default_library", "//encoding/ssz:go_default_library", "//proto/prysm/v1alpha1:go_default_library", + "//runtime/version:go_default_library", "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", + "@com_github_prometheus_client_golang//prometheus:go_default_library", + "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", + "@com_github_prysmaticlabs_eth2_types//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@org_golang_google_protobuf//proto:go_default_library", ], ) go_test( name = "go_default_test", - srcs = ["field_root_test.go"], + srcs = [ + "deprecated_getters_test.go", + "deprecated_setters_test.go", + "field_root_test.go", + "getters_block_test.go", + "getters_test.go", + "getters_validator_test.go", + ], embed = [":go_default_library"], - deps = ["//testing/assert:go_default_library"], + deps = [ + "//beacon-chain/state/v1:go_default_library", + "//encoding/bytesutil:go_default_library", + "//proto/prysm/v1alpha1:go_default_library", + "//testing/assert:go_default_library", + "//testing/require:go_default_library", + ], ) diff --git a/beacon-chain/state/v3/deprecated_getters.go b/beacon-chain/state/v3/deprecated_getters.go new file mode 100644 index 0000000000..a6b7cea7b6 --- /dev/null +++ b/beacon-chain/state/v3/deprecated_getters.go @@ -0,0 +1,16 @@ +package v3 + +import ( + "github.com/pkg/errors" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// PreviousEpochAttestations is not supported for HF1 beacon state. +func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation, error) { + return nil, errors.New("PreviousEpochAttestations is not supported for version Merge beacon state") +} + +// CurrentEpochAttestations is not supported for HF1 beacon state. +func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) { + return nil, errors.New("CurrentEpochAttestations is not supported for version Merge beacon state") +} diff --git a/beacon-chain/state/v3/deprecated_getters_test.go b/beacon-chain/state/v3/deprecated_getters_test.go new file mode 100644 index 0000000000..3488b284e5 --- /dev/null +++ b/beacon-chain/state/v3/deprecated_getters_test.go @@ -0,0 +1,19 @@ +package v3 + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestBeaconState_CurrentEpochAttestations(t *testing.T) { + s := &BeaconState{} + _, err := s.CurrentEpochAttestations() + require.ErrorContains(t, "CurrentEpochAttestations is not supported for version Merge beacon state", err) +} + +func TestBeaconState_PreviousEpochAttestations(t *testing.T) { + s := &BeaconState{} + _, err := s.PreviousEpochAttestations() + require.ErrorContains(t, "PreviousEpochAttestations is not supported for version Merge beacon state", err) +} diff --git a/beacon-chain/state/v3/deprecated_setters.go b/beacon-chain/state/v3/deprecated_setters.go new file mode 100644 index 0000000000..e99b23993d --- /dev/null +++ b/beacon-chain/state/v3/deprecated_setters.go @@ -0,0 +1,31 @@ +package v3 + +import ( + "github.com/pkg/errors" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// SetPreviousEpochAttestations is not supported for HF1 beacon state. +func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error { + return errors.New("SetPreviousEpochAttestations is not supported for version Merge beacon state") +} + +// SetCurrentEpochAttestations is not supported for HF1 beacon state. +func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error { + return errors.New("SetCurrentEpochAttestations is not supported for version Merge beacon state") +} + +// AppendCurrentEpochAttestations is not supported for HF1 beacon state. +func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error { + return errors.New("AppendCurrentEpochAttestations is not supported for version Merge beacon state") +} + +// AppendPreviousEpochAttestations is not supported for HF1 beacon state. +func (b *BeaconState) AppendPreviousEpochAttestations(val *ethpb.PendingAttestation) error { + return errors.New("AppendPreviousEpochAttestations is not supported for version Merge beacon state") +} + +// RotateAttestations is not supported for HF1 beacon state. +func (b *BeaconState) RotateAttestations() error { + return errors.New("RotateAttestations is not supported for version Merge beacon state") +} diff --git a/beacon-chain/state/v3/deprecated_setters_test.go b/beacon-chain/state/v3/deprecated_setters_test.go new file mode 100644 index 0000000000..4fbde1097d --- /dev/null +++ b/beacon-chain/state/v3/deprecated_setters_test.go @@ -0,0 +1,27 @@ +package v3 + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestBeaconState_AppendCurrentEpochAttestations(t *testing.T) { + s := &BeaconState{} + require.ErrorContains(t, "AppendCurrentEpochAttestations is not supported for version Merge beacon state", s.AppendCurrentEpochAttestations(nil)) +} + +func TestBeaconState_AppendPreviousEpochAttestations(t *testing.T) { + s := &BeaconState{} + require.ErrorContains(t, "AppendPreviousEpochAttestations is not supported for version Merge beacon state", s.AppendPreviousEpochAttestations(nil)) +} + +func TestBeaconState_SetCurrentEpochAttestations(t *testing.T) { + s := &BeaconState{} + require.ErrorContains(t, "SetCurrentEpochAttestations is not supported for version Merge beacon state", s.SetCurrentEpochAttestations(nil)) +} + +func TestBeaconState_SetPreviousEpochAttestations(t *testing.T) { + s := &BeaconState{} + require.ErrorContains(t, "SetPreviousEpochAttestations is not supported for version Merge beacon state", s.SetPreviousEpochAttestations(nil)) +} diff --git a/beacon-chain/state/v3/getters_block.go b/beacon-chain/state/v3/getters_block.go new file mode 100644 index 0000000000..b9f4bb5b3e --- /dev/null +++ b/beacon-chain/state/v3/getters_block.go @@ -0,0 +1,99 @@ +package v3 + +import ( + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// LatestBlockHeader stored within the beacon state. +func (b *BeaconState) LatestBlockHeader() *ethpb.BeaconBlockHeader { + if !b.hasInnerState() { + return nil + } + if b.state.LatestBlockHeader == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.latestBlockHeader() +} + +// latestBlockHeader stored within the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) latestBlockHeader() *ethpb.BeaconBlockHeader { + if !b.hasInnerState() { + return nil + } + if b.state.LatestBlockHeader == nil { + return nil + } + + hdr := ðpb.BeaconBlockHeader{ + Slot: b.state.LatestBlockHeader.Slot, + ProposerIndex: b.state.LatestBlockHeader.ProposerIndex, + } + + parentRoot := make([]byte, len(b.state.LatestBlockHeader.ParentRoot)) + bodyRoot := make([]byte, len(b.state.LatestBlockHeader.BodyRoot)) + stateRoot := make([]byte, len(b.state.LatestBlockHeader.StateRoot)) + + copy(parentRoot, b.state.LatestBlockHeader.ParentRoot) + copy(bodyRoot, b.state.LatestBlockHeader.BodyRoot) + copy(stateRoot, b.state.LatestBlockHeader.StateRoot) + hdr.ParentRoot = parentRoot + hdr.BodyRoot = bodyRoot + hdr.StateRoot = stateRoot + return hdr +} + +// BlockRoots kept track of in the beacon state. +func (b *BeaconState) BlockRoots() [][]byte { + if !b.hasInnerState() { + return nil + } + if b.state.BlockRoots == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.blockRoots() +} + +// blockRoots kept track of in the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) blockRoots() [][]byte { + if !b.hasInnerState() { + return nil + } + return bytesutil.SafeCopy2dBytes(b.state.BlockRoots) +} + +// BlockRootAtIndex retrieves a specific block root based on an +// input index value. +func (b *BeaconState) BlockRootAtIndex(idx uint64) ([]byte, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + if b.state.BlockRoots == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.blockRootAtIndex(idx) +} + +// blockRootAtIndex retrieves a specific block root based on an +// input index value. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) blockRootAtIndex(idx uint64) ([]byte, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + return bytesutil.SafeCopyRootAtIndex(b.state.BlockRoots, idx) +} diff --git a/beacon-chain/state/v3/getters_block_test.go b/beacon-chain/state/v3/getters_block_test.go new file mode 100644 index 0000000000..3bdccc52b8 --- /dev/null +++ b/beacon-chain/state/v3/getters_block_test.go @@ -0,0 +1,60 @@ +package v3 + +import ( + "testing" + + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + v1alpha1 "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestBeaconState_LatestBlockHeader(t *testing.T) { + s, err := InitializeFromProto(ðpb.BeaconStateMerge{}) + require.NoError(t, err) + got := s.LatestBlockHeader() + require.DeepEqual(t, (*v1alpha1.BeaconBlockHeader)(nil), got) + + want := &v1alpha1.BeaconBlockHeader{Slot: 100} + s, err = InitializeFromProto(ðpb.BeaconStateMerge{LatestBlockHeader: want}) + require.NoError(t, err) + got = s.LatestBlockHeader() + require.DeepEqual(t, want, got) + + // Test copy does not mutate. + got.Slot = 101 + require.DeepNotEqual(t, want, got) +} + +func TestBeaconState_BlockRoots(t *testing.T) { + s, err := InitializeFromProto(ðpb.BeaconStateMerge{}) + require.NoError(t, err) + got := s.BlockRoots() + require.DeepEqual(t, ([][]byte)(nil), got) + + want := [][]byte{{'a'}} + s, err = InitializeFromProto(ðpb.BeaconStateMerge{BlockRoots: want}) + require.NoError(t, err) + got = s.BlockRoots() + require.DeepEqual(t, want, got) + + // Test copy does not mutate. + got[0][0] = 'b' + require.DeepNotEqual(t, want, got) +} + +func TestBeaconState_BlockRootAtIndex(t *testing.T) { + s, err := InitializeFromProto(ðpb.BeaconStateMerge{}) + require.NoError(t, err) + got, err := s.BlockRootAtIndex(0) + require.NoError(t, err) + require.DeepEqual(t, ([]byte)(nil), got) + + r := [][]byte{{'a'}} + s, err = InitializeFromProto(ðpb.BeaconStateMerge{BlockRoots: r}) + require.NoError(t, err) + got, err = s.BlockRootAtIndex(0) + require.NoError(t, err) + want := bytesutil.PadTo([]byte{'a'}, 32) + require.DeepSSZEqual(t, want, got) +} diff --git a/beacon-chain/state/v3/getters_checkpoint.go b/beacon-chain/state/v3/getters_checkpoint.go new file mode 100644 index 0000000000..f6b6eab16c --- /dev/null +++ b/beacon-chain/state/v3/getters_checkpoint.go @@ -0,0 +1,160 @@ +package v3 + +import ( + "bytes" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/go-bitfield" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// JustificationBits marking which epochs have been justified in the beacon chain. +func (b *BeaconState) JustificationBits() bitfield.Bitvector4 { + if !b.hasInnerState() { + return nil + } + if b.state.JustificationBits == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.justificationBits() +} + +// justificationBits marking which epochs have been justified in the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) justificationBits() bitfield.Bitvector4 { + if !b.hasInnerState() { + return nil + } + if b.state.JustificationBits == nil { + return nil + } + + res := make([]byte, len(b.state.JustificationBits.Bytes())) + copy(res, b.state.JustificationBits.Bytes()) + return res +} + +// PreviousJustifiedCheckpoint denoting an epoch and block root. +func (b *BeaconState) PreviousJustifiedCheckpoint() *ethpb.Checkpoint { + if !b.hasInnerState() { + return nil + } + if b.state.PreviousJustifiedCheckpoint == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.previousJustifiedCheckpoint() +} + +// previousJustifiedCheckpoint denoting an epoch and block root. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) previousJustifiedCheckpoint() *ethpb.Checkpoint { + if !b.hasInnerState() { + return nil + } + + return ethpb.CopyCheckpoint(b.state.PreviousJustifiedCheckpoint) +} + +// CurrentJustifiedCheckpoint denoting an epoch and block root. +func (b *BeaconState) CurrentJustifiedCheckpoint() *ethpb.Checkpoint { + if !b.hasInnerState() { + return nil + } + if b.state.CurrentJustifiedCheckpoint == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.currentJustifiedCheckpoint() +} + +// currentJustifiedCheckpoint denoting an epoch and block root. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) currentJustifiedCheckpoint() *ethpb.Checkpoint { + if !b.hasInnerState() { + return nil + } + + return ethpb.CopyCheckpoint(b.state.CurrentJustifiedCheckpoint) +} + +// MatchCurrentJustifiedCheckpoint returns true if input justified checkpoint matches +// the current justified checkpoint in state. +func (b *BeaconState) MatchCurrentJustifiedCheckpoint(c *ethpb.Checkpoint) bool { + if !b.hasInnerState() { + return false + } + if b.state.CurrentJustifiedCheckpoint == nil { + return false + } + + if c.Epoch != b.state.CurrentJustifiedCheckpoint.Epoch { + return false + } + return bytes.Equal(c.Root, b.state.CurrentJustifiedCheckpoint.Root) +} + +// MatchPreviousJustifiedCheckpoint returns true if the input justified checkpoint matches +// the previous justified checkpoint in state. +func (b *BeaconState) MatchPreviousJustifiedCheckpoint(c *ethpb.Checkpoint) bool { + if !b.hasInnerState() { + return false + } + if b.state.PreviousJustifiedCheckpoint == nil { + return false + } + + if c.Epoch != b.state.PreviousJustifiedCheckpoint.Epoch { + return false + } + return bytes.Equal(c.Root, b.state.PreviousJustifiedCheckpoint.Root) +} + +// FinalizedCheckpoint denoting an epoch and block root. +func (b *BeaconState) FinalizedCheckpoint() *ethpb.Checkpoint { + if !b.hasInnerState() { + return nil + } + if b.state.FinalizedCheckpoint == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.finalizedCheckpoint() +} + +// finalizedCheckpoint denoting an epoch and block root. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) finalizedCheckpoint() *ethpb.Checkpoint { + if !b.hasInnerState() { + return nil + } + + return ethpb.CopyCheckpoint(b.state.FinalizedCheckpoint) +} + +// FinalizedCheckpointEpoch returns the epoch value of the finalized checkpoint. +func (b *BeaconState) FinalizedCheckpointEpoch() types.Epoch { + if !b.hasInnerState() { + return 0 + } + if b.state.FinalizedCheckpoint == nil { + return 0 + } + b.lock.RLock() + defer b.lock.RUnlock() + + return b.state.FinalizedCheckpoint.Epoch +} diff --git a/beacon-chain/state/v3/getters_eth1.go b/beacon-chain/state/v3/getters_eth1.go new file mode 100644 index 0000000000..04449eb9d9 --- /dev/null +++ b/beacon-chain/state/v3/getters_eth1.go @@ -0,0 +1,91 @@ +package v3 + +import ( + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// Eth1Data corresponding to the proof-of-work chain information stored in the beacon state. +func (b *BeaconState) Eth1Data() *ethpb.Eth1Data { + if !b.hasInnerState() { + return nil + } + if b.state.Eth1Data == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.eth1Data() +} + +// eth1Data corresponding to the proof-of-work chain information stored in the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) eth1Data() *ethpb.Eth1Data { + if !b.hasInnerState() { + return nil + } + if b.state.Eth1Data == nil { + return nil + } + + return ethpb.CopyETH1Data(b.state.Eth1Data) +} + +// Eth1DataVotes corresponds to votes from Ethereum on the canonical proof-of-work chain +// data retrieved from eth1. +func (b *BeaconState) Eth1DataVotes() []*ethpb.Eth1Data { + if !b.hasInnerState() { + return nil + } + if b.state.Eth1DataVotes == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.eth1DataVotes() +} + +// eth1DataVotes corresponds to votes from Ethereum on the canonical proof-of-work chain +// data retrieved from eth1. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) eth1DataVotes() []*ethpb.Eth1Data { + if !b.hasInnerState() { + return nil + } + if b.state.Eth1DataVotes == nil { + return nil + } + + res := make([]*ethpb.Eth1Data, len(b.state.Eth1DataVotes)) + for i := 0; i < len(res); i++ { + res[i] = ethpb.CopyETH1Data(b.state.Eth1DataVotes[i]) + } + return res +} + +// Eth1DepositIndex corresponds to the index of the deposit made to the +// validator deposit contract at the time of this state's eth1 data. +func (b *BeaconState) Eth1DepositIndex() uint64 { + if !b.hasInnerState() { + return 0 + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.eth1DepositIndex() +} + +// eth1DepositIndex corresponds to the index of the deposit made to the +// validator deposit contract at the time of this state's eth1 data. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) eth1DepositIndex() uint64 { + if !b.hasInnerState() { + return 0 + } + + return b.state.Eth1DepositIndex +} diff --git a/beacon-chain/state/v3/getters_misc.go b/beacon-chain/state/v3/getters_misc.go new file mode 100644 index 0000000000..211b01ee9e --- /dev/null +++ b/beacon-chain/state/v3/getters_misc.go @@ -0,0 +1,211 @@ +package v3 + +import ( + "time" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/runtime/version" +) + +// GenesisTime of the beacon state as a uint64. +func (b *BeaconState) GenesisTime() uint64 { + if !b.hasInnerState() { + return 0 + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.genesisTime() +} + +// genesisTime of the beacon state as a uint64. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) genesisTime() uint64 { + if !b.hasInnerState() { + return 0 + } + + return b.state.GenesisTime +} + +// GenesisValidatorRoot of the beacon state. +func (b *BeaconState) GenesisValidatorRoot() []byte { + if !b.hasInnerState() { + return nil + } + if b.state.GenesisValidatorsRoot == nil { + return params.BeaconConfig().ZeroHash[:] + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.genesisValidatorRoot() +} + +// genesisValidatorRoot of the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) genesisValidatorRoot() []byte { + if !b.hasInnerState() { + return nil + } + if b.state.GenesisValidatorsRoot == nil { + return params.BeaconConfig().ZeroHash[:] + } + + root := make([]byte, 32) + copy(root, b.state.GenesisValidatorsRoot) + return root +} + +// GenesisUnixTime returns the genesis time as time.Time. +func (b *BeaconState) GenesisUnixTime() time.Time { + if !b.hasInnerState() { + return time.Unix(0, 0) + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.genesisUnixTime() +} + +// genesisUnixTime returns the genesis time as time.Time. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) genesisUnixTime() time.Time { + if !b.hasInnerState() { + return time.Unix(0, 0) + } + + return time.Unix(int64(b.state.GenesisTime), 0) +} + +// ParentRoot is a convenience method to access state.LatestBlockRoot.ParentRoot. +func (b *BeaconState) ParentRoot() [32]byte { + if !b.hasInnerState() { + return [32]byte{} + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.parentRoot() +} + +// parentRoot is a convenience method to access state.LatestBlockRoot.ParentRoot. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) parentRoot() [32]byte { + if !b.hasInnerState() { + return [32]byte{} + } + + parentRoot := [32]byte{} + copy(parentRoot[:], b.state.LatestBlockHeader.ParentRoot) + return parentRoot +} + +// Version of the beacon state. This method +// is strictly meant to be used without a lock +// internally. +func (b *BeaconState) Version() int { + return version.Merge +} + +// Slot of the current beacon chain state. +func (b *BeaconState) Slot() types.Slot { + if !b.hasInnerState() { + return 0 + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.slot() +} + +// slot of the current beacon chain state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) slot() types.Slot { + if !b.hasInnerState() { + return 0 + } + + return b.state.Slot +} + +// Fork version of the beacon chain. +func (b *BeaconState) Fork() *ethpb.Fork { + if !b.hasInnerState() { + return nil + } + if b.state.Fork == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.fork() +} + +// fork version of the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) fork() *ethpb.Fork { + if !b.hasInnerState() { + return nil + } + if b.state.Fork == nil { + return nil + } + + prevVersion := make([]byte, len(b.state.Fork.PreviousVersion)) + copy(prevVersion, b.state.Fork.PreviousVersion) + currVersion := make([]byte, len(b.state.Fork.CurrentVersion)) + copy(currVersion, b.state.Fork.CurrentVersion) + return ðpb.Fork{ + PreviousVersion: prevVersion, + CurrentVersion: currVersion, + Epoch: b.state.Fork.Epoch, + } +} + +// HistoricalRoots based on epochs stored in the beacon state. +func (b *BeaconState) HistoricalRoots() [][]byte { + if !b.hasInnerState() { + return nil + } + if b.state.HistoricalRoots == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.historicalRoots() +} + +// historicalRoots based on epochs stored in the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) historicalRoots() [][]byte { + if !b.hasInnerState() { + return nil + } + return bytesutil.SafeCopy2dBytes(b.state.HistoricalRoots) +} + +// balancesLength returns the length of the balances slice. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) balancesLength() int { + if !b.hasInnerState() { + return 0 + } + if b.state.Balances == nil { + return 0 + } + + return len(b.state.Balances) +} diff --git a/beacon-chain/state/v3/getters_participation.go b/beacon-chain/state/v3/getters_participation.go new file mode 100644 index 0000000000..42358b01de --- /dev/null +++ b/beacon-chain/state/v3/getters_participation.go @@ -0,0 +1,53 @@ +package v3 + +// CurrentEpochParticipation corresponding to participation bits on the beacon chain. +func (b *BeaconState) CurrentEpochParticipation() ([]byte, error) { + if !b.hasInnerState() { + return nil, nil + } + if b.state.CurrentEpochParticipation == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.currentEpochParticipation(), nil +} + +// PreviousEpochParticipation corresponding to participation bits on the beacon chain. +func (b *BeaconState) PreviousEpochParticipation() ([]byte, error) { + if !b.hasInnerState() { + return nil, nil + } + if b.state.PreviousEpochParticipation == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.previousEpochParticipation(), nil +} + +// currentEpochParticipation corresponding to participation bits on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) currentEpochParticipation() []byte { + if !b.hasInnerState() { + return nil + } + tmp := make([]byte, len(b.state.CurrentEpochParticipation)) + copy(tmp, b.state.CurrentEpochParticipation) + return tmp +} + +// previousEpochParticipation corresponding to participation bits on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) previousEpochParticipation() []byte { + if !b.hasInnerState() { + return nil + } + tmp := make([]byte, len(b.state.PreviousEpochParticipation)) + copy(tmp, b.state.PreviousEpochParticipation) + return tmp +} diff --git a/beacon-chain/state/v3/getters_randao.go b/beacon-chain/state/v3/getters_randao.go new file mode 100644 index 0000000000..660e330ab3 --- /dev/null +++ b/beacon-chain/state/v3/getters_randao.go @@ -0,0 +1,85 @@ +package v3 + +import ( + "github.com/prysmaticlabs/prysm/encoding/bytesutil" +) + +// RandaoMixes of block proposers on the beacon chain. +func (b *BeaconState) RandaoMixes() [][]byte { + if !b.hasInnerState() { + return nil + } + if b.state.RandaoMixes == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.randaoMixes() +} + +// randaoMixes of block proposers on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) randaoMixes() [][]byte { + if !b.hasInnerState() { + return nil + } + + return bytesutil.SafeCopy2dBytes(b.state.RandaoMixes) +} + +// RandaoMixAtIndex retrieves a specific block root based on an +// input index value. +func (b *BeaconState) RandaoMixAtIndex(idx uint64) ([]byte, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + if b.state.RandaoMixes == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.randaoMixAtIndex(idx) +} + +// randaoMixAtIndex retrieves a specific block root based on an +// input index value. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) randaoMixAtIndex(idx uint64) ([]byte, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + + return bytesutil.SafeCopyRootAtIndex(b.state.RandaoMixes, idx) +} + +// RandaoMixesLength returns the length of the randao mixes slice. +func (b *BeaconState) RandaoMixesLength() int { + if !b.hasInnerState() { + return 0 + } + if b.state.RandaoMixes == nil { + return 0 + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.randaoMixesLength() +} + +// randaoMixesLength returns the length of the randao mixes slice. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) randaoMixesLength() int { + if !b.hasInnerState() { + return 0 + } + if b.state.RandaoMixes == nil { + return 0 + } + + return len(b.state.RandaoMixes) +} diff --git a/beacon-chain/state/v3/getters_state.go b/beacon-chain/state/v3/getters_state.go new file mode 100644 index 0000000000..9bd446c0b9 --- /dev/null +++ b/beacon-chain/state/v3/getters_state.go @@ -0,0 +1,127 @@ +package v3 + +import ( + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// InnerStateUnsafe returns the pointer value of the underlying +// beacon state proto object, bypassing immutability. Use with care. +func (b *BeaconState) InnerStateUnsafe() interface{} { + if b == nil { + return nil + } + return b.state +} + +// CloneInnerState the beacon state into a protobuf for usage. +func (b *BeaconState) CloneInnerState() interface{} { + if b == nil || b.state == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + return ðpb.BeaconStateAltair{ + GenesisTime: b.genesisTime(), + GenesisValidatorsRoot: b.genesisValidatorRoot(), + Slot: b.slot(), + Fork: b.fork(), + LatestBlockHeader: b.latestBlockHeader(), + BlockRoots: b.blockRoots(), + StateRoots: b.stateRoots(), + HistoricalRoots: b.historicalRoots(), + Eth1Data: b.eth1Data(), + Eth1DataVotes: b.eth1DataVotes(), + Eth1DepositIndex: b.eth1DepositIndex(), + Validators: b.validators(), + Balances: b.balances(), + RandaoMixes: b.randaoMixes(), + Slashings: b.slashings(), + CurrentEpochParticipation: b.currentEpochParticipation(), + PreviousEpochParticipation: b.previousEpochParticipation(), + JustificationBits: b.justificationBits(), + PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(), + CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(), + FinalizedCheckpoint: b.finalizedCheckpoint(), + InactivityScores: b.inactivityScores(), + CurrentSyncCommittee: b.currentSyncCommittee(), + NextSyncCommittee: b.nextSyncCommittee(), + } +} + +// hasInnerState detects if the internal reference to the state data structure +// is populated correctly. Returns false if nil. +func (b *BeaconState) hasInnerState() bool { + return b != nil && b.state != nil +} + +// StateRoots kept track of in the beacon state. +func (b *BeaconState) StateRoots() [][]byte { + if !b.hasInnerState() { + return nil + } + if b.state.StateRoots == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.stateRoots() +} + +// StateRoots kept track of in the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) stateRoots() [][]byte { + if !b.hasInnerState() { + return nil + } + return bytesutil.SafeCopy2dBytes(b.state.StateRoots) +} + +// StateRootAtIndex retrieves a specific state root based on an +// input index value. +func (b *BeaconState) StateRootAtIndex(idx uint64) ([]byte, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + if b.state.StateRoots == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.stateRootAtIndex(idx) +} + +// stateRootAtIndex retrieves a specific state root based on an +// input index value. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) stateRootAtIndex(idx uint64) ([]byte, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + return bytesutil.SafeCopyRootAtIndex(b.state.StateRoots, idx) +} + +// MarshalSSZ marshals the underlying beacon state to bytes. +func (b *BeaconState) MarshalSSZ() ([]byte, error) { + if !b.hasInnerState() { + return nil, errors.New("nil beacon state") + } + //TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65 + return []byte{}, nil +} + +// ProtobufBeaconState transforms an input into beacon state hard fork 1 in the form of protobuf. +// Error is returned if the input is not type protobuf beacon state. +func ProtobufBeaconState(s interface{}) (*ethpb.BeaconStateAltair, error) { + pbState, ok := s.(*ethpb.BeaconStateAltair) + if !ok { + return nil, errors.New("input is not type pb.BeaconStateAltair") + } + return pbState, nil +} diff --git a/beacon-chain/state/v3/getters_sync_committee.go b/beacon-chain/state/v3/getters_sync_committee.go new file mode 100644 index 0000000000..38faf4ac19 --- /dev/null +++ b/beacon-chain/state/v3/getters_sync_committee.go @@ -0,0 +1,69 @@ +package v3 + +import ( + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// currentSyncCommittee of the current sync committee in beacon chain state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) currentSyncCommittee() *ethpb.SyncCommittee { + if !b.hasInnerState() { + return nil + } + + return CopySyncCommittee(b.state.CurrentSyncCommittee) +} + +// nextSyncCommittee of the next sync committee in beacon chain state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) nextSyncCommittee() *ethpb.SyncCommittee { + if !b.hasInnerState() { + return nil + } + + return CopySyncCommittee(b.state.NextSyncCommittee) +} + +// CurrentSyncCommittee of the current sync committee in beacon chain state. +func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) { + if !b.hasInnerState() { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + if b.state.CurrentSyncCommittee == nil { + return nil, nil + } + + return b.currentSyncCommittee(), nil +} + +// NextSyncCommittee of the next sync committee in beacon chain state. +func (b *BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) { + if !b.hasInnerState() { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + if b.state.NextSyncCommittee == nil { + return nil, nil + } + + return b.nextSyncCommittee(), nil +} + +// CopySyncCommittee copies the provided sync committee object. +func CopySyncCommittee(data *ethpb.SyncCommittee) *ethpb.SyncCommittee { + if data == nil { + return nil + } + return ðpb.SyncCommittee{ + Pubkeys: bytesutil.SafeCopy2dBytes(data.Pubkeys), + AggregatePubkey: bytesutil.SafeCopyBytes(data.AggregatePubkey), + } +} diff --git a/beacon-chain/state/v3/getters_test.go b/beacon-chain/state/v3/getters_test.go new file mode 100644 index 0000000000..6cf112dbcb --- /dev/null +++ b/beacon-chain/state/v3/getters_test.go @@ -0,0 +1,88 @@ +package v3 + +import ( + "runtime/debug" + "sync" + "testing" + + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestBeaconState_SlotDataRace(t *testing.T) { + headState, err := InitializeFromProto(ðpb.BeaconStateMerge{Slot: 1}) + require.NoError(t, err) + + wg := sync.WaitGroup{} + wg.Add(2) + go func() { + require.NoError(t, headState.SetSlot(0)) + wg.Done() + }() + go func() { + headState.Slot() + wg.Done() + }() + + wg.Wait() +} + +func TestNilState_NoPanic(t *testing.T) { + var st *BeaconState + defer func() { + if r := recover(); r != nil { + t.Errorf("Method panicked when it was not supposed to: %v\n%v\n", r, string(debug.Stack())) + } + }() + // retrieve elements from nil state + _ = st.GenesisTime() + _ = st.GenesisValidatorRoot() + _ = st.GenesisUnixTime() + _ = st.GenesisValidatorRoot() + _ = st.Slot() + _ = st.Fork() + _ = st.LatestBlockHeader() + _ = st.ParentRoot() + _ = st.BlockRoots() + _, err := st.BlockRootAtIndex(0) + _ = err + _ = st.StateRoots() + _ = st.HistoricalRoots() + _ = st.Eth1Data() + _ = st.Eth1DataVotes() + _ = st.Eth1DepositIndex() + _, err = st.ValidatorAtIndex(0) + _ = err + _, err = st.ValidatorAtIndexReadOnly(0) + _ = err + _, _ = st.ValidatorIndexByPubkey([48]byte{}) + _ = st.PubkeyAtIndex(0) + _ = st.NumValidators() + _ = st.Balances() + _, err = st.BalanceAtIndex(0) + _ = err + _ = st.BalancesLength() + _ = st.RandaoMixes() + _, err = st.RandaoMixAtIndex(0) + _ = err + _ = st.RandaoMixesLength() + _ = st.Slashings() + _, err = st.CurrentEpochParticipation() + _ = err + _, err = st.PreviousEpochParticipation() + _ = err + _ = st.JustificationBits() + _ = st.PreviousJustifiedCheckpoint() + _ = st.CurrentJustifiedCheckpoint() + _ = st.FinalizedCheckpoint() + _, err = st.CurrentEpochParticipation() + _ = err + _, err = st.PreviousEpochParticipation() + _ = err + _, err = st.InactivityScores() + _ = err + _, err = st.CurrentSyncCommittee() + _ = err + _, err = st.NextSyncCommittee() + _ = err +} diff --git a/beacon-chain/state/v3/getters_validator.go b/beacon-chain/state/v3/getters_validator.go new file mode 100644 index 0000000000..d764aeaebe --- /dev/null +++ b/beacon-chain/state/v3/getters_validator.go @@ -0,0 +1,328 @@ +package v3 + +import ( + "fmt" + + "github.com/pkg/errors" + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/state" + v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// ValidatorIndexOutOfRangeError represents an error scenario where a validator does not exist +// at a given index in the validator's array. +type ValidatorIndexOutOfRangeError struct { + message string +} + +var ( + // ErrNilValidatorsInState returns when accessing validators in the state while the state has a + // nil slice for the validators field. + ErrNilValidatorsInState = errors.New("state has nil validator slice") +) + +// NewValidatorIndexOutOfRangeError creates a new error instance. +func NewValidatorIndexOutOfRangeError(index types.ValidatorIndex) ValidatorIndexOutOfRangeError { + return ValidatorIndexOutOfRangeError{ + message: fmt.Sprintf("index %d out of range", index), + } +} + +// Error returns the underlying error message. +func (e *ValidatorIndexOutOfRangeError) Error() string { + return e.message +} + +// Validators participating in consensus on the beacon chain. +func (b *BeaconState) Validators() []*ethpb.Validator { + if !b.hasInnerState() { + return nil + } + if b.state.Validators == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.validators() +} + +// validators participating in consensus on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) validators() []*ethpb.Validator { + if !b.hasInnerState() { + return nil + } + if b.state.Validators == nil { + return nil + } + + res := make([]*ethpb.Validator, len(b.state.Validators)) + for i := 0; i < len(res); i++ { + val := b.state.Validators[i] + if val == nil { + continue + } + res[i] = ethpb.CopyValidator(val) + } + return res +} + +// references of validators participating in consensus on the beacon chain. +// This assumes that a lock is already held on BeaconState. This does not +// copy fully and instead just copies the reference. +func (b *BeaconState) validatorsReferences() []*ethpb.Validator { + if !b.hasInnerState() { + return nil + } + if b.state.Validators == nil { + return nil + } + + res := make([]*ethpb.Validator, len(b.state.Validators)) + for i := 0; i < len(res); i++ { + validator := b.state.Validators[i] + if validator == nil { + continue + } + // copy validator reference instead. + res[i] = validator + } + return res +} + +// ValidatorAtIndex is the validator at the provided index. +func (b *BeaconState) ValidatorAtIndex(idx types.ValidatorIndex) (*ethpb.Validator, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + if b.state.Validators == nil { + return ðpb.Validator{}, nil + } + if uint64(len(b.state.Validators)) <= uint64(idx) { + e := NewValidatorIndexOutOfRangeError(idx) + return nil, &e + } + + b.lock.RLock() + defer b.lock.RUnlock() + + val := b.state.Validators[idx] + return ethpb.CopyValidator(val), nil +} + +// ValidatorAtIndexReadOnly is the validator at the provided index. This method +// doesn't clone the validator. +func (b *BeaconState) ValidatorAtIndexReadOnly(idx types.ValidatorIndex) (state.ReadOnlyValidator, error) { + if !b.hasInnerState() { + return nil, ErrNilInnerState + } + if b.state.Validators == nil { + return nil, ErrNilValidatorsInState + } + if uint64(len(b.state.Validators)) <= uint64(idx) { + e := NewValidatorIndexOutOfRangeError(idx) + return nil, &e + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return v1.NewValidator(b.state.Validators[idx]) +} + +// ValidatorIndexByPubkey returns a given validator by its 48-byte public key. +func (b *BeaconState) ValidatorIndexByPubkey(key [48]byte) (types.ValidatorIndex, bool) { + if b == nil || b.valMapHandler == nil || b.valMapHandler.IsNil() { + return 0, false + } + b.lock.RLock() + defer b.lock.RUnlock() + numOfVals := len(b.state.Validators) + + idx, ok := b.valMapHandler.Get(key) + if ok && numOfVals <= int(idx) { + return types.ValidatorIndex(0), false + } + return idx, ok +} + +// PubkeyAtIndex returns the pubkey at the given +// validator index. +func (b *BeaconState) PubkeyAtIndex(idx types.ValidatorIndex) [48]byte { + if !b.hasInnerState() { + return [48]byte{} + } + if uint64(idx) >= uint64(len(b.state.Validators)) { + return [48]byte{} + } + b.lock.RLock() + defer b.lock.RUnlock() + + if b.state.Validators[idx] == nil { + return [48]byte{} + } + return bytesutil.ToBytes48(b.state.Validators[idx].PublicKey) +} + +// NumValidators returns the size of the validator registry. +func (b *BeaconState) NumValidators() int { + if !b.hasInnerState() { + return 0 + } + b.lock.RLock() + defer b.lock.RUnlock() + + return len(b.state.Validators) +} + +// ReadFromEveryValidator reads values from every validator and applies it to the provided function. +// Warning: This method is potentially unsafe, as it exposes the actual validator registry. +func (b *BeaconState) ReadFromEveryValidator(f func(idx int, val state.ReadOnlyValidator) error) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + if b.state.Validators == nil { + return errors.New("nil validators in state") + } + b.lock.RLock() + validators := b.state.Validators + b.lock.RUnlock() + + for i, v := range validators { + v, err := v1.NewValidator(v) + if err != nil { + return err + } + if err := f(i, v); err != nil { + return err + } + } + return nil +} + +// Balances of validators participating in consensus on the beacon chain. +func (b *BeaconState) Balances() []uint64 { + if !b.hasInnerState() { + return nil + } + if b.state.Balances == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.balances() +} + +// balances of validators participating in consensus on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) balances() []uint64 { + if !b.hasInnerState() { + return nil + } + if b.state.Balances == nil { + return nil + } + + res := make([]uint64, len(b.state.Balances)) + copy(res, b.state.Balances) + return res +} + +// BalanceAtIndex of validator with the provided index. +func (b *BeaconState) BalanceAtIndex(idx types.ValidatorIndex) (uint64, error) { + if !b.hasInnerState() { + return 0, ErrNilInnerState + } + if b.state.Balances == nil { + return 0, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + if uint64(len(b.state.Balances)) <= uint64(idx) { + return 0, fmt.Errorf("index of %d does not exist", idx) + } + return b.state.Balances[idx], nil +} + +// BalancesLength returns the length of the balances slice. +func (b *BeaconState) BalancesLength() int { + if !b.hasInnerState() { + return 0 + } + if b.state.Balances == nil { + return 0 + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.balancesLength() +} + +// Slashings of validators on the beacon chain. +func (b *BeaconState) Slashings() []uint64 { + if !b.hasInnerState() { + return nil + } + if b.state.Slashings == nil { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.slashings() +} + +// slashings of validators on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) slashings() []uint64 { + if !b.hasInnerState() { + return nil + } + if b.state.Slashings == nil { + return nil + } + + res := make([]uint64, len(b.state.Slashings)) + copy(res, b.state.Slashings) + return res +} + +// inactivityScores of validators participating in consensus on the beacon chain. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) inactivityScores() []uint64 { + if !b.hasInnerState() { + return nil + } + if b.state.InactivityScores == nil { + return nil + } + + res := make([]uint64, len(b.state.InactivityScores)) + copy(res, b.state.InactivityScores) + return res +} + +// InactivityScores of validators participating in consensus on the beacon chain. +func (b *BeaconState) InactivityScores() ([]uint64, error) { + if !b.hasInnerState() { + return nil, nil + } + if b.state.InactivityScores == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.inactivityScores(), nil +} diff --git a/beacon-chain/state/v3/getters_validator_test.go b/beacon-chain/state/v3/getters_validator_test.go new file mode 100644 index 0000000000..019f6dc8b8 --- /dev/null +++ b/beacon-chain/state/v3/getters_validator_test.go @@ -0,0 +1,20 @@ +package v3_test + +import ( + "testing" + + v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/assert" + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice(t *testing.T) { + st, err := v1.InitializeFromProtoUnsafe(ðpb.BeaconState{ + Validators: nil, + }) + require.NoError(t, err) + + _, err = st.ValidatorAtIndexReadOnly(0) + assert.Equal(t, v1.ErrNilValidatorsInState, err) +} diff --git a/beacon-chain/state/v3/setters_block.go b/beacon-chain/state/v3/setters_block.go new file mode 100644 index 0000000000..81fa29f328 --- /dev/null +++ b/beacon-chain/state/v3/setters_block.go @@ -0,0 +1,68 @@ +package v3 + +import ( + "fmt" + + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// SetLatestBlockHeader in the beacon state. +func (b *BeaconState) SetLatestBlockHeader(val *ethpb.BeaconBlockHeader) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.LatestBlockHeader = ethpb.CopyBeaconBlockHeader(val) + b.markFieldAsDirty(latestBlockHeader) + return nil +} + +// SetBlockRoots for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetBlockRoots(val [][]byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[blockRoots].MinusRef() + b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1) + + b.state.BlockRoots = val + b.markFieldAsDirty(blockRoots) + b.rebuildTrie[blockRoots] = true + return nil +} + +// UpdateBlockRootAtIndex for the beacon state. Updates the block root +// at a specific index to a new value. +func (b *BeaconState) UpdateBlockRootAtIndex(idx uint64, blockRoot [32]byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + if uint64(len(b.state.BlockRoots)) <= idx { + return fmt.Errorf("invalid index provided %d", idx) + } + b.lock.Lock() + defer b.lock.Unlock() + + r := b.state.BlockRoots + if ref := b.sharedFieldReferences[blockRoots]; ref.Refs() > 1 { + // Copy elements in underlying array by reference. + r = make([][]byte, len(b.state.BlockRoots)) + copy(r, b.state.BlockRoots) + ref.MinusRef() + b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1) + } + + r[idx] = blockRoot[:] + b.state.BlockRoots = r + + b.markFieldAsDirty(blockRoots) + b.addDirtyIndices(blockRoots, []uint64{idx}) + return nil +} diff --git a/beacon-chain/state/v3/setters_checkpoint.go b/beacon-chain/state/v3/setters_checkpoint.go new file mode 100644 index 0000000000..601f89dab6 --- /dev/null +++ b/beacon-chain/state/v3/setters_checkpoint.go @@ -0,0 +1,58 @@ +package v3 + +import ( + "github.com/prysmaticlabs/go-bitfield" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// SetJustificationBits for the beacon state. +func (b *BeaconState) SetJustificationBits(val bitfield.Bitvector4) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.JustificationBits = val + b.markFieldAsDirty(justificationBits) + return nil +} + +// SetPreviousJustifiedCheckpoint for the beacon state. +func (b *BeaconState) SetPreviousJustifiedCheckpoint(val *ethpb.Checkpoint) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.PreviousJustifiedCheckpoint = val + b.markFieldAsDirty(previousJustifiedCheckpoint) + return nil +} + +// SetCurrentJustifiedCheckpoint for the beacon state. +func (b *BeaconState) SetCurrentJustifiedCheckpoint(val *ethpb.Checkpoint) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.CurrentJustifiedCheckpoint = val + b.markFieldAsDirty(currentJustifiedCheckpoint) + return nil +} + +// SetFinalizedCheckpoint for the beacon state. +func (b *BeaconState) SetFinalizedCheckpoint(val *ethpb.Checkpoint) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.FinalizedCheckpoint = val + b.markFieldAsDirty(finalizedCheckpoint) + return nil +} diff --git a/beacon-chain/state/v3/setters_eth1.go b/beacon-chain/state/v3/setters_eth1.go new file mode 100644 index 0000000000..315bafe4b3 --- /dev/null +++ b/beacon-chain/state/v3/setters_eth1.go @@ -0,0 +1,74 @@ +package v3 + +import ( + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// SetEth1Data for the beacon state. +func (b *BeaconState) SetEth1Data(val *ethpb.Eth1Data) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.Eth1Data = val + b.markFieldAsDirty(eth1Data) + return nil +} + +// SetEth1DataVotes for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetEth1DataVotes(val []*ethpb.Eth1Data) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[eth1DataVotes].MinusRef() + b.sharedFieldReferences[eth1DataVotes] = stateutil.NewRef(1) + + b.state.Eth1DataVotes = val + b.markFieldAsDirty(eth1DataVotes) + b.rebuildTrie[eth1DataVotes] = true + return nil +} + +// SetEth1DepositIndex for the beacon state. +func (b *BeaconState) SetEth1DepositIndex(val uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.Eth1DepositIndex = val + b.markFieldAsDirty(eth1DepositIndex) + return nil +} + +// AppendEth1DataVotes for the beacon state. Appends the new value +// to the the end of list. +func (b *BeaconState) AppendEth1DataVotes(val *ethpb.Eth1Data) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + votes := b.state.Eth1DataVotes + if b.sharedFieldReferences[eth1DataVotes].Refs() > 1 { + // Copy elements in underlying array by reference. + votes = make([]*ethpb.Eth1Data, len(b.state.Eth1DataVotes)) + copy(votes, b.state.Eth1DataVotes) + b.sharedFieldReferences[eth1DataVotes].MinusRef() + b.sharedFieldReferences[eth1DataVotes] = stateutil.NewRef(1) + } + + b.state.Eth1DataVotes = append(votes, val) + b.markFieldAsDirty(eth1DataVotes) + b.addDirtyIndices(eth1DataVotes, []uint64{uint64(len(b.state.Eth1DataVotes) - 1)}) + return nil +} diff --git a/beacon-chain/state/v3/setters_misc.go b/beacon-chain/state/v3/setters_misc.go new file mode 100644 index 0000000000..27839ff993 --- /dev/null +++ b/beacon-chain/state/v3/setters_misc.go @@ -0,0 +1,186 @@ +package v3 + +import ( + "github.com/pkg/errors" + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/features" + "github.com/prysmaticlabs/prysm/crypto/hash" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "google.golang.org/protobuf/proto" +) + +// For our setters, we have a field reference counter through +// which we can track shared field references. This helps when +// performing state copies, as we simply copy the reference to the +// field. When we do need to do need to modify these fields, we +// perform a full copy of the field. This is true of most of our +// fields except for the following below. +// 1) BlockRoots +// 2) StateRoots +// 3) Eth1DataVotes +// 4) RandaoMixes +// 5) HistoricalRoots +// 6) CurrentParticipationBits +// 7) PreviousParticipationBits +// +// The fields referred to above are instead copied by reference, where +// we simply copy the reference to the underlying object instead of the +// whole object. This is possible due to how we have structured our state +// as we copy the value on read, so as to ensure the underlying object is +// not mutated while it is being accessed during a state read. + +const ( + // This specifies the limit till which we process all dirty indices for a certain field. + // If we have more dirty indices than the threshold, then we rebuild the whole trie. This + // comes due to the fact that O(alogn) > O(n) beyond a certain value of a. + indicesLimit = 8000 +) + +// SetGenesisTime for the beacon state. +func (b *BeaconState) SetGenesisTime(val uint64) error { + b.lock.Lock() + defer b.lock.Unlock() + + b.state.GenesisTime = val + b.markFieldAsDirty(genesisTime) + return nil +} + +// SetGenesisValidatorRoot for the beacon state. +func (b *BeaconState) SetGenesisValidatorRoot(val []byte) error { + b.lock.Lock() + defer b.lock.Unlock() + + b.state.GenesisValidatorsRoot = val + b.markFieldAsDirty(genesisValidatorRoot) + return nil +} + +// SetSlot for the beacon state. +func (b *BeaconState) SetSlot(val types.Slot) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.Slot = val + b.markFieldAsDirty(slot) + return nil +} + +// SetFork version for the beacon chain. +func (b *BeaconState) SetFork(val *ethpb.Fork) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + fk, ok := proto.Clone(val).(*ethpb.Fork) + if !ok { + return errors.New("proto.Clone did not return a fork proto") + } + b.state.Fork = fk + b.markFieldAsDirty(fork) + return nil +} + +// SetHistoricalRoots for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetHistoricalRoots(val [][]byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[historicalRoots].MinusRef() + b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1) + + b.state.HistoricalRoots = val + b.markFieldAsDirty(historicalRoots) + return nil +} + +// AppendHistoricalRoots for the beacon state. Appends the new value +// to the the end of list. +func (b *BeaconState) AppendHistoricalRoots(root [32]byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + roots := b.state.HistoricalRoots + if b.sharedFieldReferences[historicalRoots].Refs() > 1 { + roots = make([][]byte, len(b.state.HistoricalRoots)) + copy(roots, b.state.HistoricalRoots) + b.sharedFieldReferences[historicalRoots].MinusRef() + b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1) + } + + b.state.HistoricalRoots = append(roots, root[:]) + b.markFieldAsDirty(historicalRoots) + return nil +} + +// Recomputes the branch up the index in the Merkle trie representation +// of the beacon state. This method performs slice reads and the caller MUST +// hold the lock before calling this method. +func (b *BeaconState) recomputeRoot(idx int) { + hashFunc := hash.CustomSHA256Hasher() + layers := b.merkleLayers + // The merkle tree structure looks as follows: + // [[r1, r2, r3, r4], [parent1, parent2], [root]] + // Using information about the index which changed, idx, we recompute + // only its branch up the tree. + currentIndex := idx + root := b.merkleLayers[0][idx] + for i := 0; i < len(layers)-1; i++ { + isLeft := currentIndex%2 == 0 + neighborIdx := currentIndex ^ 1 + + neighbor := make([]byte, 32) + if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) { + neighbor = layers[i][neighborIdx] + } + if isLeft { + parentHash := hashFunc(append(root, neighbor...)) + root = parentHash[:] + } else { + parentHash := hashFunc(append(neighbor, root...)) + root = parentHash[:] + } + parentIdx := currentIndex / 2 + // Update the cached layers at the parent index. + layers[i+1][parentIdx] = root + currentIndex = parentIdx + } + b.merkleLayers = layers +} + +func (b *BeaconState) markFieldAsDirty(field stateTypes.FieldIndex) { + b.dirtyFields[field] = true +} + +// addDirtyIndices adds the relevant dirty field indices, so that they +// can be recomputed. +func (b *BeaconState) addDirtyIndices(index stateTypes.FieldIndex, indices []uint64) { + if b.rebuildTrie[index] { + return + } + // Exit early if balance trie computation isn't enabled. + if !features.Get().EnableBalanceTrieComputation && index == balances { + return + } + totalIndicesLen := len(b.dirtyIndices[index]) + len(indices) + if totalIndicesLen > indicesLimit { + b.rebuildTrie[index] = true + b.dirtyIndices[index] = []uint64{} + } else { + b.dirtyIndices[index] = append(b.dirtyIndices[index], indices...) + } +} diff --git a/beacon-chain/state/v3/setters_participation.go b/beacon-chain/state/v3/setters_participation.go new file mode 100644 index 0000000000..c5aa72c160 --- /dev/null +++ b/beacon-chain/state/v3/setters_participation.go @@ -0,0 +1,89 @@ +package v3 + +import ( + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" +) + +// SetPreviousParticipationBits for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetPreviousParticipationBits(val []byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[previousEpochParticipationBits].MinusRef() + b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1) + + b.state.PreviousEpochParticipation = val + b.markFieldAsDirty(previousEpochParticipationBits) + b.rebuildTrie[previousEpochParticipationBits] = true + return nil +} + +// SetCurrentParticipationBits for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetCurrentParticipationBits(val []byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[currentEpochParticipationBits].MinusRef() + b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1) + + b.state.CurrentEpochParticipation = val + b.markFieldAsDirty(currentEpochParticipationBits) + b.rebuildTrie[currentEpochParticipationBits] = true + return nil +} + +// AppendCurrentParticipationBits for the beacon state. Appends the new value +// to the the end of list. +func (b *BeaconState) AppendCurrentParticipationBits(val byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + participation := b.state.CurrentEpochParticipation + if b.sharedFieldReferences[currentEpochParticipationBits].Refs() > 1 { + // Copy elements in underlying array by reference. + participation = make([]byte, len(b.state.CurrentEpochParticipation)) + copy(participation, b.state.CurrentEpochParticipation) + b.sharedFieldReferences[currentEpochParticipationBits].MinusRef() + b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1) + } + + b.state.CurrentEpochParticipation = append(participation, val) + b.markFieldAsDirty(currentEpochParticipationBits) + b.addDirtyIndices(currentEpochParticipationBits, []uint64{uint64(len(b.state.CurrentEpochParticipation) - 1)}) + return nil +} + +// AppendPreviousParticipationBits for the beacon state. Appends the new value +// to the the end of list. +func (b *BeaconState) AppendPreviousParticipationBits(val byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + bits := b.state.PreviousEpochParticipation + if b.sharedFieldReferences[previousEpochParticipationBits].Refs() > 1 { + bits = make([]byte, len(b.state.PreviousEpochParticipation)) + copy(bits, b.state.PreviousEpochParticipation) + b.sharedFieldReferences[previousEpochParticipationBits].MinusRef() + b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1) + } + + b.state.PreviousEpochParticipation = append(bits, val) + b.markFieldAsDirty(previousEpochParticipationBits) + b.addDirtyIndices(previousEpochParticipationBits, []uint64{uint64(len(b.state.PreviousEpochParticipation) - 1)}) + + return nil +} diff --git a/beacon-chain/state/v3/setters_randao.go b/beacon-chain/state/v3/setters_randao.go new file mode 100644 index 0000000000..017ca6a57b --- /dev/null +++ b/beacon-chain/state/v3/setters_randao.go @@ -0,0 +1,53 @@ +package v3 + +import ( + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" +) + +// SetRandaoMixes for the beacon state. Updates the entire +// randao mixes to a new value by overwriting the previous one. +func (b *BeaconState) SetRandaoMixes(val [][]byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[randaoMixes].MinusRef() + b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1) + + b.state.RandaoMixes = val + b.markFieldAsDirty(randaoMixes) + b.rebuildTrie[randaoMixes] = true + return nil +} + +// UpdateRandaoMixesAtIndex for the beacon state. Updates the randao mixes +// at a specific index to a new value. +func (b *BeaconState) UpdateRandaoMixesAtIndex(idx uint64, val []byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + if uint64(len(b.state.RandaoMixes)) <= idx { + return errors.Errorf("invalid index provided %d", idx) + } + b.lock.Lock() + defer b.lock.Unlock() + + mixes := b.state.RandaoMixes + if refs := b.sharedFieldReferences[randaoMixes].Refs(); refs > 1 { + // Copy elements in underlying array by reference. + mixes = make([][]byte, len(b.state.RandaoMixes)) + copy(mixes, b.state.RandaoMixes) + b.sharedFieldReferences[randaoMixes].MinusRef() + b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1) + } + + mixes[idx] = val + b.state.RandaoMixes = mixes + b.markFieldAsDirty(randaoMixes) + b.addDirtyIndices(randaoMixes, []uint64{idx}) + + return nil +} diff --git a/beacon-chain/state/v3/setters_state.go b/beacon-chain/state/v3/setters_state.go new file mode 100644 index 0000000000..1fee76364e --- /dev/null +++ b/beacon-chain/state/v3/setters_state.go @@ -0,0 +1,41 @@ +package v3 + +import ( + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" +) + +// UpdateStateRootAtIndex for the beacon state. Updates the state root +// at a specific index to a new value. +func (b *BeaconState) UpdateStateRootAtIndex(idx uint64, stateRoot [32]byte) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + + b.lock.RLock() + if uint64(len(b.state.StateRoots)) <= idx { + b.lock.RUnlock() + return errors.Errorf("invalid index provided %d", idx) + } + b.lock.RUnlock() + + b.lock.Lock() + defer b.lock.Unlock() + + // Check if we hold the only reference to the shared state roots slice. + r := b.state.StateRoots + if ref := b.sharedFieldReferences[stateRoots]; ref.Refs() > 1 { + // Copy elements in underlying array by reference. + r = make([][]byte, len(b.state.StateRoots)) + copy(r, b.state.StateRoots) + ref.MinusRef() + b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1) + } + + r[idx] = stateRoot[:] + b.state.StateRoots = r + + b.markFieldAsDirty(stateRoots) + b.addDirtyIndices(stateRoots, []uint64{idx}) + return nil +} diff --git a/beacon-chain/state/v3/setters_sync_committee.go b/beacon-chain/state/v3/setters_sync_committee.go new file mode 100644 index 0000000000..7b5fa8372b --- /dev/null +++ b/beacon-chain/state/v3/setters_sync_committee.go @@ -0,0 +1,31 @@ +package v3 + +import ( + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// SetCurrentSyncCommittee for the beacon state. +func (b *BeaconState) SetCurrentSyncCommittee(val *ethpb.SyncCommittee) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.CurrentSyncCommittee = val + b.markFieldAsDirty(currentSyncCommittee) + return nil +} + +// SetNextSyncCommittee for the beacon state. +func (b *BeaconState) SetNextSyncCommittee(val *ethpb.SyncCommittee) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.NextSyncCommittee = val + b.markFieldAsDirty(nextSyncCommittee) + return nil +} diff --git a/beacon-chain/state/v3/setters_validator.go b/beacon-chain/state/v3/setters_validator.go new file mode 100644 index 0000000000..f621ff81b5 --- /dev/null +++ b/beacon-chain/state/v3/setters_validator.go @@ -0,0 +1,265 @@ +package v3 + +import ( + "github.com/pkg/errors" + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// SetValidators for the beacon state. Updates the entire +// to a new value by overwriting the previous one. +func (b *BeaconState) SetValidators(val []*ethpb.Validator) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.Validators = val + b.sharedFieldReferences[validators].MinusRef() + b.sharedFieldReferences[validators] = stateutil.NewRef(1) + b.markFieldAsDirty(validators) + b.rebuildTrie[validators] = true + b.valMapHandler = stateutil.NewValMapHandler(b.state.Validators) + return nil +} + +// ApplyToEveryValidator applies the provided callback function to each validator in the +// validator registry. +func (b *BeaconState) ApplyToEveryValidator(f func(idx int, val *ethpb.Validator) (bool, *ethpb.Validator, error)) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + v := b.state.Validators + if ref := b.sharedFieldReferences[validators]; ref.Refs() > 1 { + v = b.validatorsReferences() + ref.MinusRef() + b.sharedFieldReferences[validators] = stateutil.NewRef(1) + } + b.lock.Unlock() + var changedVals []uint64 + for i, val := range v { + changed, newVal, err := f(i, val) + if err != nil { + return err + } + if changed { + changedVals = append(changedVals, uint64(i)) + v[i] = newVal + } + } + + b.lock.Lock() + defer b.lock.Unlock() + + b.state.Validators = v + b.markFieldAsDirty(validators) + b.addDirtyIndices(validators, changedVals) + + return nil +} + +// UpdateValidatorAtIndex for the beacon state. Updates the validator +// at a specific index to a new value. +func (b *BeaconState) UpdateValidatorAtIndex(idx types.ValidatorIndex, val *ethpb.Validator) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + if uint64(len(b.state.Validators)) <= uint64(idx) { + return errors.Errorf("invalid index provided %d", idx) + } + b.lock.Lock() + defer b.lock.Unlock() + + v := b.state.Validators + if ref := b.sharedFieldReferences[validators]; ref.Refs() > 1 { + v = b.validatorsReferences() + ref.MinusRef() + b.sharedFieldReferences[validators] = stateutil.NewRef(1) + } + + v[idx] = val + b.state.Validators = v + b.markFieldAsDirty(validators) + b.addDirtyIndices(validators, []uint64{uint64(idx)}) + + return nil +} + +// SetBalances for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetBalances(val []uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[balances].MinusRef() + b.sharedFieldReferences[balances] = stateutil.NewRef(1) + + b.state.Balances = val + b.markFieldAsDirty(balances) + b.rebuildTrie[balances] = true + return nil +} + +// UpdateBalancesAtIndex for the beacon state. This method updates the balance +// at a specific index to a new value. +func (b *BeaconState) UpdateBalancesAtIndex(idx types.ValidatorIndex, val uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + if uint64(len(b.state.Balances)) <= uint64(idx) { + return errors.Errorf("invalid index provided %d", idx) + } + b.lock.Lock() + defer b.lock.Unlock() + + bals := b.state.Balances + if b.sharedFieldReferences[balances].Refs() > 1 { + bals = b.balances() + b.sharedFieldReferences[balances].MinusRef() + b.sharedFieldReferences[balances] = stateutil.NewRef(1) + } + + bals[idx] = val + b.state.Balances = bals + b.markFieldAsDirty(balances) + b.addDirtyIndices(balances, []uint64{uint64(idx)}) + return nil +} + +// SetSlashings for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetSlashings(val []uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[slashings].MinusRef() + b.sharedFieldReferences[slashings] = stateutil.NewRef(1) + + b.state.Slashings = val + b.markFieldAsDirty(slashings) + return nil +} + +// UpdateSlashingsAtIndex for the beacon state. Updates the slashings +// at a specific index to a new value. +func (b *BeaconState) UpdateSlashingsAtIndex(idx, val uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + if uint64(len(b.state.Slashings)) <= idx { + return errors.Errorf("invalid index provided %d", idx) + } + b.lock.Lock() + defer b.lock.Unlock() + + s := b.state.Slashings + if b.sharedFieldReferences[slashings].Refs() > 1 { + s = b.slashings() + b.sharedFieldReferences[slashings].MinusRef() + b.sharedFieldReferences[slashings] = stateutil.NewRef(1) + } + + s[idx] = val + + b.state.Slashings = s + + b.markFieldAsDirty(slashings) + return nil +} + +// AppendValidator for the beacon state. Appends the new value +// to the the end of list. +func (b *BeaconState) AppendValidator(val *ethpb.Validator) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + vals := b.state.Validators + if b.sharedFieldReferences[validators].Refs() > 1 { + vals = b.validatorsReferences() + b.sharedFieldReferences[validators].MinusRef() + b.sharedFieldReferences[validators] = stateutil.NewRef(1) + } + + // append validator to slice + b.state.Validators = append(vals, val) + valIdx := types.ValidatorIndex(len(b.state.Validators) - 1) + + b.valMapHandler.Set(bytesutil.ToBytes48(val.PublicKey), valIdx) + + b.markFieldAsDirty(validators) + b.addDirtyIndices(validators, []uint64{uint64(valIdx)}) + return nil +} + +// AppendBalance for the beacon state. Appends the new value +// to the the end of list. +func (b *BeaconState) AppendBalance(bal uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + bals := b.state.Balances + if b.sharedFieldReferences[balances].Refs() > 1 { + bals = b.balances() + b.sharedFieldReferences[balances].MinusRef() + b.sharedFieldReferences[balances] = stateutil.NewRef(1) + } + + b.state.Balances = append(bals, bal) + balIdx := len(b.state.Balances) - 1 + b.markFieldAsDirty(balances) + b.addDirtyIndices(balances, []uint64{uint64(balIdx)}) + return nil +} + +// AppendInactivityScore for the beacon state. +func (b *BeaconState) AppendInactivityScore(s uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + scores := b.state.InactivityScores + if b.sharedFieldReferences[inactivityScores].Refs() > 1 { + scores = b.inactivityScores() + b.sharedFieldReferences[inactivityScores].MinusRef() + b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1) + } + + b.state.InactivityScores = append(scores, s) + b.markFieldAsDirty(inactivityScores) + return nil +} + +// SetInactivityScores for the beacon state. Updates the entire +// list to a new value by overwriting the previous one. +func (b *BeaconState) SetInactivityScores(val []uint64) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.sharedFieldReferences[inactivityScores].MinusRef() + b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1) + + b.state.InactivityScores = val + b.markFieldAsDirty(inactivityScores) + return nil +} diff --git a/beacon-chain/state/v3/state_trie.go b/beacon-chain/state/v3/state_trie.go new file mode 100644 index 0000000000..227c7456c5 --- /dev/null +++ b/beacon-chain/state/v3/state_trie.go @@ -0,0 +1,71 @@ +package v3 + +import ( + "github.com/pkg/errors" + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/params" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "google.golang.org/protobuf/proto" +) + +var ( + stateCount = promauto.NewGauge(prometheus.GaugeOpts{ + Name: "beacon_state_merge_count", + Help: "Count the number of active beacon state objects.", + }) +) + +// InitializeFromProto the beacon state from a protobuf representation. +func InitializeFromProto(st *ethpb.BeaconStateMerge) (*BeaconState, error) { + return InitializeFromProtoUnsafe(proto.Clone(st).(*ethpb.BeaconStateMerge)) +} + +// InitializeFromProtoUnsafe directly uses the beacon state protobuf pointer +// and sets it as the inner state of the BeaconState type. +func InitializeFromProtoUnsafe(st *ethpb.BeaconStateMerge) (*BeaconState, error) { + if st == nil { + return nil, errors.New("received nil state") + } + + fieldCount := params.BeaconConfig().BeaconStateAltairFieldCount + b := &BeaconState{ + state: st, + dirtyFields: make(map[types.FieldIndex]bool, fieldCount), + dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount), + stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount), + sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, 11), + rebuildTrie: make(map[types.FieldIndex]bool, fieldCount), + valMapHandler: stateutil.NewValMapHandler(st.Validators), + } + + var err error + for i := 0; i < fieldCount; i++ { + b.dirtyFields[types.FieldIndex(i)] = true + b.rebuildTrie[types.FieldIndex(i)] = true + b.dirtyIndices[types.FieldIndex(i)] = []uint64{} + b.stateFieldLeaves[types.FieldIndex(i)], err = fieldtrie.NewFieldTrie(types.FieldIndex(i), types.BasicArray, nil, 0) + if err != nil { + return nil, err + } + } + + // Initialize field reference tracking for shared data. + b.sharedFieldReferences[randaoMixes] = stateutil.NewRef(1) + b.sharedFieldReferences[stateRoots] = stateutil.NewRef(1) + b.sharedFieldReferences[blockRoots] = stateutil.NewRef(1) + b.sharedFieldReferences[previousEpochParticipationBits] = stateutil.NewRef(1) // New in Altair. + b.sharedFieldReferences[currentEpochParticipationBits] = stateutil.NewRef(1) // New in Altair. + b.sharedFieldReferences[slashings] = stateutil.NewRef(1) + b.sharedFieldReferences[eth1DataVotes] = stateutil.NewRef(1) + b.sharedFieldReferences[validators] = stateutil.NewRef(1) + b.sharedFieldReferences[balances] = stateutil.NewRef(1) + b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1) // New in Altair. + b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1) + + stateCount.Inc() + return b, nil +} diff --git a/beacon-chain/state/v3/types.go b/beacon-chain/state/v3/types.go index 5f83397f0a..0859996972 100644 --- a/beacon-chain/state/v3/types.go +++ b/beacon-chain/state/v3/types.go @@ -22,9 +22,36 @@ func init() { // Initialize the composite arrays. fieldMap[types.Eth1DataVotes] = types.CompositeArray fieldMap[types.Validators] = types.CompositeArray + fieldMap[types.Balances] = types.CompressedArray } -// TODO: Add field Aliases for values from the types package. It'll come in part 2. +// Field Aliases for values from the types package. +const ( + genesisTime = types.GenesisTime + genesisValidatorRoot = types.GenesisValidatorRoot + slot = types.Slot + fork = types.Fork + latestBlockHeader = types.LatestBlockHeader + blockRoots = types.BlockRoots + stateRoots = types.StateRoots + historicalRoots = types.HistoricalRoots + eth1Data = types.Eth1Data + eth1DataVotes = types.Eth1DataVotes + eth1DepositIndex = types.Eth1DepositIndex + validators = types.Validators + balances = types.Balances + randaoMixes = types.RandaoMixes + slashings = types.Slashings + previousEpochParticipationBits = types.PreviousEpochParticipationBits + currentEpochParticipationBits = types.CurrentEpochParticipationBits + justificationBits = types.JustificationBits + previousJustifiedCheckpoint = types.PreviousJustifiedCheckpoint + currentJustifiedCheckpoint = types.CurrentJustifiedCheckpoint + finalizedCheckpoint = types.FinalizedCheckpoint + inactivityScores = types.InactivityScores + currentSyncCommittee = types.CurrentSyncCommittee + nextSyncCommittee = types.NextSyncCommittee +) // fieldMap keeps track of each field // to its corresponding data type. From d4a420ddfd3aafbd267b7e041901aacdc774eff2 Mon Sep 17 00:00:00 2001 From: Potuz Date: Mon, 22 Nov 2021 18:20:31 -0300 Subject: [PATCH 14/45] Monitor blocks (#9910) * Add proposer logging to validator monitor Co-authored-by: terence tsao --- beacon-chain/monitor/metrics.go | 12 ++- .../monitor/process_attestation_test.go | 5 ++ beacon-chain/monitor/process_block.go | 68 ++++++++++++++++ beacon-chain/monitor/process_block_test.go | 77 +++++++++++++++++++ beacon-chain/monitor/service.go | 1 + 5 files changed, 162 insertions(+), 1 deletion(-) diff --git a/beacon-chain/monitor/metrics.go b/beacon-chain/monitor/metrics.go index c9bab3532f..61d50a1cbb 100644 --- a/beacon-chain/monitor/metrics.go +++ b/beacon-chain/monitor/metrics.go @@ -55,7 +55,17 @@ var ( "validator_index", }, ) - + // proposedSlotsCounter used to track proposed blocks + proposedSlotsCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "monitor", + Name: "proposed_slots_total", + Help: "Number of proposed blocks included", + }, + []string{ + "validator_index", + }, + ) // aggregationCounter used to track aggregations aggregationCounter = promauto.NewCounterVec( prometheus.CounterOpts{ diff --git a/beacon-chain/monitor/process_attestation_test.go b/beacon-chain/monitor/process_attestation_test.go index f87d8929e5..d991992a66 100644 --- a/beacon-chain/monitor/process_attestation_test.go +++ b/beacon-chain/monitor/process_attestation_test.go @@ -25,6 +25,7 @@ func setupService(t *testing.T) *Service { 1: nil, 2: nil, 12: nil, + 15: nil, } latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{ 1: { @@ -36,12 +37,16 @@ func setupService(t *testing.T) *Service { 12: { balance: 31900000000, }, + 15: { + balance: 31900000000, + }, } aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ 1: {}, 2: {}, 12: {}, + 15: {}, } return &Service{ diff --git a/beacon-chain/monitor/process_block.go b/beacon-chain/monitor/process_block.go index eeca75b4a4..b2edb8bf67 100644 --- a/beacon-chain/monitor/process_block.go +++ b/beacon-chain/monitor/process_block.go @@ -1,15 +1,83 @@ package monitor import ( + "context" "fmt" types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" + "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/encoding/bytesutil" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "github.com/sirupsen/logrus" ) +// processBlock handles the cases when +// 1) A block was proposed by one of our tracked validators +// 2) An attestation by one of our tracked validators was included +// 3) An Exit by one of our validators was included +// 4) A Slashing by one of our tracked validators was included +// 5) A Sync Committe Contribution by one of our tracked validators was included +func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) { + if b == nil || b.Block() == nil { + return + } + blk := b.Block() + + s.processSlashings(blk) + s.processExitsFromBlock(blk) + + root, err := blk.HashTreeRoot() + if err != nil { + log.WithError(err).Error("Could not compute block's hash tree root") + return + } + state := s.config.StateGen.StateByRootIfCachedNoCopy(root) + if state == nil { + log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug( + "Skipping block collection due to state not found in cache") + return + } + + s.processProposedBlock(state, root, blk) + s.processAttestations(ctx, state, blk) +} + +// processProposedBlock logs the event that one of our tracked validators proposed a block that was included +func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, blk block.BeaconBlock) { + if s.TrackedIndex(blk.ProposerIndex()) { + // update metrics + proposedSlotsCounter.WithLabelValues(fmt.Sprintf("%d", blk.ProposerIndex())).Inc() + + // update the performance map + balance, err := state.BalanceAtIndex(blk.ProposerIndex()) + if err != nil { + log.WithError(err).Error("Could not get balance") + return + } + + latestPerf := s.latestPerformance[blk.ProposerIndex()] + balanceChg := balance - latestPerf.balance + latestPerf.balanceChange = balanceChg + latestPerf.balance = balance + s.latestPerformance[blk.ProposerIndex()] = latestPerf + + aggPerf := s.aggregatedPerformance[blk.ProposerIndex()] + aggPerf.totalProposedCount++ + s.aggregatedPerformance[blk.ProposerIndex()] = aggPerf + + log.WithFields(logrus.Fields{ + "ProposerIndex": blk.ProposerIndex(), + "Slot": blk.Slot(), + "Version": blk.Version(), + "ParentRoot": fmt.Sprintf("%#x", bytesutil.Trunc(blk.ParentRoot())), + "BlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc(root[:])), + "NewBalance": balance, + "BalanceChange": balanceChg, + }).Info("Proposed block was included") + } +} + // processSlashings logs the event of one of our tracked validators was slashed func (s *Service) processSlashings(blk block.BeaconBlock) { for _, slashing := range blk.Body().ProposerSlashings() { diff --git a/beacon-chain/monitor/process_block_test.go b/beacon-chain/monitor/process_block_test.go index 10290b576f..5c9c3def08 100644 --- a/beacon-chain/monitor/process_block_test.go +++ b/beacon-chain/monitor/process_block_test.go @@ -1,10 +1,13 @@ package monitor import ( + "context" + "fmt" "testing" types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/testing/require" @@ -129,3 +132,77 @@ func TestProcessSlashings(t *testing.T) { }) } } + +func TestProcessProposedBlock(t *testing.T) { + tests := []struct { + name string + block *ethpb.BeaconBlock + wantedErr string + }{ + { + name: "Block proposed by tracked validator", + block: ðpb.BeaconBlock{ + Slot: 6, + ProposerIndex: 12, + ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32), + StateRoot: bytesutil.PadTo([]byte("state-world"), 32), + }, + wantedErr: "\"Proposed block was included\" BalanceChange=100000000 BlockRoot=0x68656c6c6f2d NewBalance=32000000000 ParentRoot=0x68656c6c6f2d ProposerIndex=12 Slot=6 Version=0 prefix=monitor", + }, + { + name: "Block proposed by untracked validator", + block: ðpb.BeaconBlock{ + Slot: 6, + ProposerIndex: 13, + ParentRoot: bytesutil.PadTo([]byte("hello-world"), 32), + StateRoot: bytesutil.PadTo([]byte("state-world"), 32), + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + beaconState, _ := util.DeterministicGenesisState(t, 256) + root := [32]byte{} + copy(root[:], "hello-world") + s.processProposedBlock(beaconState, root, wrapper.WrappedPhase0BeaconBlock(tt.block)) + if tt.wantedErr != "" { + require.LogsContain(t, hook, tt.wantedErr) + } else { + require.LogsDoNotContain(t, hook, "included") + } + }) + } + +} + +func TestProcessBlock_ProposerAndSlashedTrackedVals(t *testing.T) { + hook := logTest.NewGlobal() + ctx := context.Background() + s := setupService(t) + genesis, keys := util.DeterministicGenesisState(t, 64) + genConfig := util.DefaultBlockGenConfig() + genConfig.NumProposerSlashings = 1 + b, err := util.GenerateFullBlock(genesis, keys, genConfig, 1) + idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex + if !s.TrackedIndex(idx) { + s.config.TrackedValidators[idx] = nil + s.latestPerformance[idx] = ValidatorLatestPerformance{ + balance: 31900000000, + } + s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{} + } + + require.NoError(t, err) + root, err := b.GetBlock().HashTreeRoot() + require.NoError(t, err) + require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis)) + wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0x67a9fe4d0d8d ProposerIndex=15 Slot=1 Version=0 prefix=monitor", bytesutil.Trunc(root[:])) + wanted2 := fmt.Sprintf("\"Proposer slashing was included\" ProposerIndex=%d Root1=0x000100000000 Root2=0x000200000000 SlashingSlot=0 Slot:=1 prefix=monitor", idx) + wrapped := wrapper.WrappedPhase0SignedBeaconBlock(b) + s.processBlock(ctx, wrapped) + require.LogsContain(t, hook, wanted1) + require.LogsContain(t, hook, wanted2) +} diff --git a/beacon-chain/monitor/service.go b/beacon-chain/monitor/service.go index 39746d43a1..1e9c1c6f6c 100644 --- a/beacon-chain/monitor/service.go +++ b/beacon-chain/monitor/service.go @@ -25,6 +25,7 @@ type ValidatorAggregatedPerformance struct { totalCorrectSource uint64 totalCorrectTarget uint64 totalCorrectHead uint64 + totalProposedCount uint64 totalAggregations uint64 } From 94fd99f5cd92d84895789b6e7fb0b93848ea148d Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 22 Nov 2021 15:56:23 -0800 Subject: [PATCH 15/45] Add getters and setters for beacon state v3 (part 2) (#9916) --- beacon-chain/state/v1/unsupported_getters.go | 5 ++++ beacon-chain/state/v1/unsupported_setters.go | 5 ++++ beacon-chain/state/v2/deprecated_getters.go | 5 ++++ beacon-chain/state/v2/deprecated_setters.go | 5 ++++ beacon-chain/state/v3/BUILD.bazel | 2 ++ .../state/v3/getters_payload_header.go | 30 +++++++++++++++++++ .../state/v3/setters_payload_header.go | 16 ++++++++++ beacon-chain/state/v3/types.go | 1 + proto/prysm/v1alpha1/cloners.go | 23 ++++++++++++++ proto/prysm/v1alpha1/cloners_test.go | 29 ++++++++++++++++++ 10 files changed, 121 insertions(+) create mode 100644 beacon-chain/state/v3/getters_payload_header.go create mode 100644 beacon-chain/state/v3/setters_payload_header.go diff --git a/beacon-chain/state/v1/unsupported_getters.go b/beacon-chain/state/v1/unsupported_getters.go index 00030e837c..07ad3bd2b1 100644 --- a/beacon-chain/state/v1/unsupported_getters.go +++ b/beacon-chain/state/v1/unsupported_getters.go @@ -29,3 +29,8 @@ func (b *BeaconState) CurrentSyncCommittee() (*ethpb.SyncCommittee, error) { func (b *BeaconState) NextSyncCommittee() (*ethpb.SyncCommittee, error) { return nil, errors.New("NextSyncCommittee is not supported for phase 0 beacon state") } + +// LatestExecutionPayloadHeader is not supported for phase 0 beacon state. +func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) { + return nil, errors.New("LatestExecutionPayloadHeader is not supported for phase 0 beacon state") +} diff --git a/beacon-chain/state/v1/unsupported_setters.go b/beacon-chain/state/v1/unsupported_setters.go index 57cacdf3b4..c4e3b4a63d 100644 --- a/beacon-chain/state/v1/unsupported_setters.go +++ b/beacon-chain/state/v1/unsupported_setters.go @@ -44,3 +44,8 @@ func (b *BeaconState) SetCurrentParticipationBits(val []byte) error { func (b *BeaconState) SetInactivityScores(val []uint64) error { return errors.New("SetInactivityScores is not supported for phase 0 beacon state") } + +// SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state. +func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error { + return errors.New("SetLatestExecutionPayloadHeader is not supported for phase 0 beacon state") +} diff --git a/beacon-chain/state/v2/deprecated_getters.go b/beacon-chain/state/v2/deprecated_getters.go index 087b01a7f3..46caf83d0d 100644 --- a/beacon-chain/state/v2/deprecated_getters.go +++ b/beacon-chain/state/v2/deprecated_getters.go @@ -14,3 +14,8 @@ func (b *BeaconState) PreviousEpochAttestations() ([]*ethpb.PendingAttestation, func (b *BeaconState) CurrentEpochAttestations() ([]*ethpb.PendingAttestation, error) { return nil, errors.New("CurrentEpochAttestations is not supported for hard fork 1 beacon state") } + +// LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state. +func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) { + return nil, errors.New("LatestExecutionPayloadHeader is not supported for hard fork 1 beacon state") +} diff --git a/beacon-chain/state/v2/deprecated_setters.go b/beacon-chain/state/v2/deprecated_setters.go index 61c18ab15e..7125a7cd2c 100644 --- a/beacon-chain/state/v2/deprecated_setters.go +++ b/beacon-chain/state/v2/deprecated_setters.go @@ -29,3 +29,8 @@ func (b *BeaconState) AppendPreviousEpochAttestations(val *ethpb.PendingAttestat func (b *BeaconState) RotateAttestations() error { return errors.New("RotateAttestations is not supported for hard fork 1 beacon state") } + +// SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state. +func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error { + return errors.New("SetLatestExecutionPayloadHeader is not supported for hard fork 1 beacon state") +} diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel index 52301778fe..32320e1809 100644 --- a/beacon-chain/state/v3/BUILD.bazel +++ b/beacon-chain/state/v3/BUILD.bazel @@ -14,6 +14,7 @@ go_library( "getters_eth1.go", "getters_misc.go", "getters_participation.go", + "getters_payload_header.go", "getters_randao.go", "getters_state.go", "getters_sync_committee.go", @@ -23,6 +24,7 @@ go_library( "setters_eth1.go", "setters_misc.go", "setters_participation.go", + "setters_payload_header.go", "setters_randao.go", "setters_state.go", "setters_sync_committee.go", diff --git a/beacon-chain/state/v3/getters_payload_header.go b/beacon-chain/state/v3/getters_payload_header.go new file mode 100644 index 0000000000..bfbd893f25 --- /dev/null +++ b/beacon-chain/state/v3/getters_payload_header.go @@ -0,0 +1,30 @@ +package v3 + +import ( + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// LatestExecutionPayloadHeader of the beacon state. +func (b *BeaconState) LatestExecutionPayloadHeader() (*ethpb.ExecutionPayloadHeader, error) { + if !b.hasInnerState() { + return nil, nil + } + if b.state.LatestExecutionPayloadHeader == nil { + return nil, nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + + return b.latestExecutionPayloadHeader(), nil +} + +// latestExecutionPayloadHeader of the beacon state. +// This assumes that a lock is already held on BeaconState. +func (b *BeaconState) latestExecutionPayloadHeader() *ethpb.ExecutionPayloadHeader { + if !b.hasInnerState() { + return nil + } + + return ethpb.CopyExecutionPayloadHeader(b.state.LatestExecutionPayloadHeader) +} diff --git a/beacon-chain/state/v3/setters_payload_header.go b/beacon-chain/state/v3/setters_payload_header.go new file mode 100644 index 0000000000..516db0f50f --- /dev/null +++ b/beacon-chain/state/v3/setters_payload_header.go @@ -0,0 +1,16 @@ +package v3 + +import ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + +// SetLatestExecutionPayloadHeader for the beacon state. +func (b *BeaconState) SetLatestExecutionPayloadHeader(val *ethpb.ExecutionPayloadHeader) error { + if !b.hasInnerState() { + return ErrNilInnerState + } + b.lock.Lock() + defer b.lock.Unlock() + + b.state.LatestExecutionPayloadHeader = val + b.markFieldAsDirty(latestExecutionPayloadHeader) + return nil +} diff --git a/beacon-chain/state/v3/types.go b/beacon-chain/state/v3/types.go index 0859996972..b4dced110b 100644 --- a/beacon-chain/state/v3/types.go +++ b/beacon-chain/state/v3/types.go @@ -51,6 +51,7 @@ const ( inactivityScores = types.InactivityScores currentSyncCommittee = types.CurrentSyncCommittee nextSyncCommittee = types.NextSyncCommittee + latestExecutionPayloadHeader = types.LatestExecutionPayloadHeader ) // fieldMap keeps track of each field diff --git a/proto/prysm/v1alpha1/cloners.go b/proto/prysm/v1alpha1/cloners.go index 4a63ca2862..2e8e4d7823 100644 --- a/proto/prysm/v1alpha1/cloners.go +++ b/proto/prysm/v1alpha1/cloners.go @@ -376,3 +376,26 @@ func CopySyncAggregate(a *SyncAggregate) *SyncAggregate { SyncCommitteeSignature: bytesutil.SafeCopyBytes(a.SyncCommitteeSignature), } } + +// CopyExecutionPayloadHeader copies the provided execution payload object. +func CopyExecutionPayloadHeader(payload *ExecutionPayloadHeader) *ExecutionPayloadHeader { + if payload == nil { + return nil + } + return &ExecutionPayloadHeader{ + ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash), + Coinbase: bytesutil.SafeCopyBytes(payload.Coinbase), + StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot), + ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptRoot), + LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom), + Random: bytesutil.SafeCopyBytes(payload.Random), + BlockNumber: payload.BlockNumber, + GasLimit: payload.GasLimit, + GasUsed: payload.GasUsed, + Timestamp: payload.Timestamp, + BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), + ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData), + BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash), + TransactionsRoot: bytesutil.SafeCopyBytes(payload.TransactionsRoot), + } +} diff --git a/proto/prysm/v1alpha1/cloners_test.go b/proto/prysm/v1alpha1/cloners_test.go index 21b59e155b..3fb20fd07b 100644 --- a/proto/prysm/v1alpha1/cloners_test.go +++ b/proto/prysm/v1alpha1/cloners_test.go @@ -308,6 +308,16 @@ func TestCopyPendingAttestationSlice(t *testing.T) { } } +func TestCopyPayloadHeader(t *testing.T) { + p := genPayloadHeader() + + got := v1alpha1.CopyExecutionPayloadHeader(p) + if !reflect.DeepEqual(got, p) { + t.Errorf("CopyExecutionPayloadHeader() = %v, want %v", got, p) + } + assert.NotEmpty(t, got, "Copied execution payload header has empty fields") +} + func bytes() []byte { b := make([]byte, 32) _, err := rand.Read(b) @@ -572,3 +582,22 @@ func genSyncCommitteeMessage() *v1alpha1.SyncCommitteeMessage { Signature: bytes(), } } + +func genPayloadHeader() *v1alpha1.ExecutionPayloadHeader { + return &v1alpha1.ExecutionPayloadHeader{ + ParentHash: bytes(), + Coinbase: bytes(), + StateRoot: bytes(), + ReceiptRoot: bytes(), + LogsBloom: bytes(), + Random: bytes(), + BlockNumber: 1, + GasLimit: 2, + GasUsed: 3, + Timestamp: 4, + ExtraData: bytes(), + BaseFeePerGas: bytes(), + BlockHash: bytes(), + TransactionsRoot: bytes(), + } +} From 6f20d17d15b689cee121f242a07a059cbeab8139 Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Wed, 24 Nov 2021 00:57:06 +0800 Subject: [PATCH 16/45] Rename To Signature Batch (#9926) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: RadosÅ‚aw Kapka --- beacon-chain/blockchain/process_block.go | 4 +- beacon-chain/core/blocks/attestation_test.go | 8 ++-- beacon-chain/core/blocks/header_test.go | 2 +- beacon-chain/core/blocks/randao_test.go | 2 +- beacon-chain/core/blocks/signature.go | 44 +++++++++---------- beacon-chain/core/signing/signing_root.go | 12 ++--- .../transition/transition_no_verify_sig.go | 10 ++--- beacon-chain/sync/batch_verifier.go | 4 +- beacon-chain/sync/batch_verifier_test.go | 12 ++--- beacon-chain/sync/validate_aggregate_proof.go | 10 ++--- .../sync/validate_beacon_attestation.go | 2 +- .../sync/validate_sync_committee_message.go | 2 +- .../sync/validate_sync_contribution_proof.go | 6 +-- crypto/bls/BUILD.bazel | 4 +- .../{signature_set.go => signature_batch.go} | 24 +++++----- ...re_set_test.go => signature_batch_test.go} | 6 +-- .../shared/altair/operations/attestation.go | 2 +- .../shared/phase0/operations/attestation.go | 2 +- 18 files changed, 78 insertions(+), 78 deletions(-) rename crypto/bls/{signature_set.go => signature_batch.go} (64%) rename crypto/bls/{signature_set_test.go => signature_batch_test.go} (92%) diff --git a/beacon-chain/blockchain/process_block.go b/beacon-chain/blockchain/process_block.go index 1a26b9438a..3f8300253a 100644 --- a/beacon-chain/blockchain/process_block.go +++ b/beacon-chain/blockchain/process_block.go @@ -260,12 +260,12 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo jCheckpoints := make([]*ethpb.Checkpoint, len(blks)) fCheckpoints := make([]*ethpb.Checkpoint, len(blks)) - sigSet := &bls.SignatureSet{ + sigSet := &bls.SignatureBatch{ Signatures: [][]byte{}, PublicKeys: []bls.PublicKey{}, Messages: [][32]byte{}, } - var set *bls.SignatureSet + var set *bls.SignatureBatch boundaries := make(map[[32]byte]state.BeaconState) for i, b := range blks { set, preState, err = transition.ExecuteStateTransitionNoVerifyAnySig(ctx, preState, b) diff --git a/beacon-chain/core/blocks/attestation_test.go b/beacon-chain/core/blocks/attestation_test.go index a14763b6cd..c4a7be1acd 100644 --- a/beacon-chain/core/blocks/attestation_test.go +++ b/beacon-chain/core/blocks/attestation_test.go @@ -358,7 +358,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) { } want := "nil or missing indexed attestation data" - _, err := blocks.AttestationSignatureSet(context.Background(), beaconState, atts) + _, err := blocks.AttestationSignatureBatch(context.Background(), beaconState, atts) assert.ErrorContains(t, want, err) atts = []*ethpb.Attestation{} @@ -378,7 +378,7 @@ func TestValidateIndexedAttestation_BadAttestationsSignatureSet(t *testing.T) { } want = "expected non-empty attesting indices" - _, err = blocks.AttestationSignatureSet(context.Background(), beaconState, atts) + _, err = blocks.AttestationSignatureBatch(context.Background(), beaconState, atts) assert.ErrorContains(t, want, err) } @@ -502,7 +502,7 @@ func TestRetrieveAttestationSignatureSet_VerifiesMultipleAttestations(t *testing } att2.Signature = bls.AggregateSignatures(sigs).Marshal() - set, err := blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2}) + set, err := blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2}) require.NoError(t, err) verified, err := set.Verify() require.NoError(t, err) @@ -566,6 +566,6 @@ func TestRetrieveAttestationSignatureSet_AcrossFork(t *testing.T) { } att2.Signature = bls.AggregateSignatures(sigs).Marshal() - _, err = blocks.AttestationSignatureSet(ctx, st, []*ethpb.Attestation{att1, att2}) + _, err = blocks.AttestationSignatureBatch(ctx, st, []*ethpb.Attestation{att1, att2}) require.NoError(t, err) } diff --git a/beacon-chain/core/blocks/header_test.go b/beacon-chain/core/blocks/header_test.go index 904cd4f926..4725236af1 100644 --- a/beacon-chain/core/blocks/header_test.go +++ b/beacon-chain/core/blocks/header_test.go @@ -321,7 +321,7 @@ func TestBlockSignatureSet_OK(t *testing.T) { validators[proposerIdx].PublicKey = priv.PublicKey().Marshal() err = state.UpdateValidatorAtIndex(proposerIdx, validators[proposerIdx]) require.NoError(t, err) - set, err := blocks.BlockSignatureSet(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot) + set, err := blocks.BlockSignatureBatch(state, block.Block.ProposerIndex, block.Signature, block.Block.HashTreeRoot) require.NoError(t, err) verified, err := set.Verify() diff --git a/beacon-chain/core/blocks/randao_test.go b/beacon-chain/core/blocks/randao_test.go index 7139cb94eb..758490458a 100644 --- a/beacon-chain/core/blocks/randao_test.go +++ b/beacon-chain/core/blocks/randao_test.go @@ -82,7 +82,7 @@ func TestRandaoSignatureSet_OK(t *testing.T) { }, } - set, err := blocks.RandaoSignatureSet(context.Background(), beaconState, block.Body.RandaoReveal) + set, err := blocks.RandaoSignatureBatch(context.Background(), beaconState, block.Body.RandaoReveal) require.NoError(t, err) verified, err := set.Verify() require.NoError(t, err) diff --git a/beacon-chain/core/blocks/signature.go b/beacon-chain/core/blocks/signature.go index ced47060d5..4ca314a0ee 100644 --- a/beacon-chain/core/blocks/signature.go +++ b/beacon-chain/core/blocks/signature.go @@ -18,8 +18,8 @@ import ( "github.com/prysmaticlabs/prysm/time/slots" ) -// retrieves the signature set from the raw data, public key,signature and domain provided. -func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, error) { +// retrieves the signature batch from the raw data, public key,signature and domain provided. +func signatureBatch(signedData, pub, signature, domain []byte) (*bls.SignatureBatch, error) { publicKey, err := bls.PublicKeyFromBytes(pub) if err != nil { return nil, errors.Wrap(err, "could not convert bytes to public key") @@ -32,7 +32,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, if err != nil { return nil, errors.Wrap(err, "could not hash container") } - return &bls.SignatureSet{ + return &bls.SignatureBatch{ Signatures: [][]byte{signature}, PublicKeys: []bls.PublicKey{publicKey}, Messages: [][32]byte{root}, @@ -41,7 +41,7 @@ func signatureSet(signedData, pub, signature, domain []byte) (*bls.SignatureSet, // verifies the signature from the raw data, public key and domain provided. func verifySignature(signedData, pub, signature, domain []byte) error { - set, err := signatureSet(signedData, pub, signature, domain) + set, err := signatureBatch(signedData, pub, signature, domain) if err != nil { return err } @@ -116,11 +116,11 @@ func VerifyBlockSignatureUsingCurrentFork(beaconState state.ReadOnlyBeaconState, return signing.VerifyBlockSigningRoot(proposerPubKey, blk.Signature(), domain, blk.Block().HashTreeRoot) } -// BlockSignatureSet retrieves the block signature set from the provided block and its corresponding state. -func BlockSignatureSet(beaconState state.ReadOnlyBeaconState, +// BlockSignatureBatch retrieves the block signature batch from the provided block and its corresponding state. +func BlockSignatureBatch(beaconState state.ReadOnlyBeaconState, proposerIndex types.ValidatorIndex, sig []byte, - rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) { + rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) { currentEpoch := slots.ToEpoch(beaconState.Slot()) domain, err := signing.Domain(beaconState.Fork(), currentEpoch, params.BeaconConfig().DomainBeaconProposer, beaconState.GenesisValidatorRoot()) if err != nil { @@ -131,21 +131,21 @@ func BlockSignatureSet(beaconState state.ReadOnlyBeaconState, return nil, err } proposerPubKey := proposer.PublicKey - return signing.BlockSignatureSet(proposerPubKey, sig, domain, rootFunc) + return signing.BlockSignatureBatch(proposerPubKey, sig, domain, rootFunc) } -// RandaoSignatureSet retrieves the relevant randao specific signature set object +// RandaoSignatureBatch retrieves the relevant randao specific signature batch object // from a block and its corresponding state. -func RandaoSignatureSet( +func RandaoSignatureBatch( ctx context.Context, beaconState state.ReadOnlyBeaconState, reveal []byte, -) (*bls.SignatureSet, error) { +) (*bls.SignatureBatch, error) { buf, proposerPub, domain, err := randaoSigningData(ctx, beaconState) if err != nil { return nil, err } - set, err := signatureSet(buf, proposerPub, reveal, domain) + set, err := signatureBatch(buf, proposerPub, reveal, domain) if err != nil { return nil, err } @@ -171,13 +171,13 @@ func randaoSigningData(ctx context.Context, beaconState state.ReadOnlyBeaconStat return buf, proposerPub[:], domain, nil } -// Method to break down attestations of the same domain and collect them into a single signature set. -func createAttestationSignatureSet( +// Method to break down attestations of the same domain and collect them into a single signature batch. +func createAttestationSignatureBatch( ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation, domain []byte, -) (*bls.SignatureSet, error) { +) (*bls.SignatureBatch, error) { if len(atts) == 0 { return nil, nil } @@ -216,16 +216,16 @@ func createAttestationSignatureSet( } msgs[i] = root } - return &bls.SignatureSet{ + return &bls.SignatureBatch{ Signatures: sigs, PublicKeys: pks, Messages: msgs, }, nil } -// AttestationSignatureSet retrieves all the related attestation signature data such as the relevant public keys, -// signatures and attestation signing data and collate it into a signature set object. -func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureSet, error) { +// AttestationSignatureBatch retrieves all the related attestation signature data such as the relevant public keys, +// signatures and attestation signing data and collate it into a signature batch object. +func AttestationSignatureBatch(ctx context.Context, beaconState state.ReadOnlyBeaconState, atts []*ethpb.Attestation) (*bls.SignatureBatch, error) { if len(atts) == 0 { return bls.NewSet(), nil } @@ -247,12 +247,12 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac set := bls.NewSet() // Check attestations from before the fork. - if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig set. + if fork.Epoch > 0 && len(preForkAtts) > 0 { // Check to prevent underflow and there is valid attestations to create sig batch. prevDomain, err := signing.Domain(fork, fork.Epoch-1, dt, gvr) if err != nil { return nil, err } - aSet, err := createAttestationSignatureSet(ctx, beaconState, preForkAtts, prevDomain) + aSet, err := createAttestationSignatureBatch(ctx, beaconState, preForkAtts, prevDomain) if err != nil { return nil, err } @@ -272,7 +272,7 @@ func AttestationSignatureSet(ctx context.Context, beaconState state.ReadOnlyBeac return nil, err } - aSet, err := createAttestationSignatureSet(ctx, beaconState, postForkAtts, currDomain) + aSet, err := createAttestationSignatureBatch(ctx, beaconState, postForkAtts, currDomain) if err != nil { return nil, err } diff --git a/beacon-chain/core/signing/signing_root.go b/beacon-chain/core/signing/signing_root.go index b6e362a97f..ed95c91a9f 100644 --- a/beacon-chain/core/signing/signing_root.go +++ b/beacon-chain/core/signing/signing_root.go @@ -118,11 +118,11 @@ func VerifyBlockHeaderSigningRoot(blkHdr *ethpb.BeaconBlockHeader, pub, signatur // VerifyBlockSigningRoot verifies the signing root of a block given its public key, signature and domain. func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) error { - set, err := BlockSignatureSet(pub, signature, domain, rootFunc) + set, err := BlockSignatureBatch(pub, signature, domain, rootFunc) if err != nil { return err } - // We assume only one signature set is returned here. + // We assume only one signature batch is returned here. sig := set.Signatures[0] publicKey := set.PublicKeys[0] root := set.Messages[0] @@ -137,9 +137,9 @@ func VerifyBlockSigningRoot(pub, signature, domain []byte, rootFunc func() ([32] return nil } -// BlockSignatureSet retrieves the relevant signature, message and pubkey data from a block and collating it -// into a signature set object. -func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureSet, error) { +// BlockSignatureBatch retrieves the relevant signature, message and pubkey data from a block and collating it +// into a signature batch object. +func BlockSignatureBatch(pub, signature, domain []byte, rootFunc func() ([32]byte, error)) (*bls.SignatureBatch, error) { publicKey, err := bls.PublicKeyFromBytes(pub) if err != nil { return nil, errors.Wrap(err, "could not convert bytes to public key") @@ -149,7 +149,7 @@ func BlockSignatureSet(pub, signature, domain []byte, rootFunc func() ([32]byte, if err != nil { return nil, errors.Wrap(err, "could not compute signing root") } - return &bls.SignatureSet{ + return &bls.SignatureBatch{ Signatures: [][]byte{signature}, PublicKeys: []bls.PublicKey{publicKey}, Messages: [][32]byte{root}, diff --git a/beacon-chain/core/transition/transition_no_verify_sig.go b/beacon-chain/core/transition/transition_no_verify_sig.go index d1e9f7a68b..19dd33933e 100644 --- a/beacon-chain/core/transition/transition_no_verify_sig.go +++ b/beacon-chain/core/transition/transition_no_verify_sig.go @@ -45,7 +45,7 @@ func ExecuteStateTransitionNoVerifyAnySig( ctx context.Context, state state.BeaconState, signed block.SignedBeaconBlock, -) (*bls.SignatureSet, state.BeaconState, error) { +) (*bls.SignatureBatch, state.BeaconState, error) { if ctx.Err() != nil { return nil, nil, ctx.Err() } @@ -182,7 +182,7 @@ func ProcessBlockNoVerifyAnySig( ctx context.Context, state state.BeaconState, signed block.SignedBeaconBlock, -) (*bls.SignatureSet, state.BeaconState, error) { +) (*bls.SignatureBatch, state.BeaconState, error) { ctx, span := trace.StartSpan(ctx, "core.state.ProcessBlockNoVerifyAnySig") defer span.End() if err := helpers.BeaconBlockIsNil(signed); err != nil { @@ -209,17 +209,17 @@ func ProcessBlockNoVerifyAnySig( } } - bSet, err := b.BlockSignatureSet(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot) + bSet, err := b.BlockSignatureBatch(state, blk.ProposerIndex(), signed.Signature(), blk.HashTreeRoot) if err != nil { tracing.AnnotateError(span, err) return nil, nil, errors.Wrap(err, "could not retrieve block signature set") } - rSet, err := b.RandaoSignatureSet(ctx, state, signed.Block().Body().RandaoReveal()) + rSet, err := b.RandaoSignatureBatch(ctx, state, signed.Block().Body().RandaoReveal()) if err != nil { tracing.AnnotateError(span, err) return nil, nil, errors.Wrap(err, "could not retrieve randao signature set") } - aSet, err := b.AttestationSignatureSet(ctx, state, signed.Block().Body().Attestations()) + aSet, err := b.AttestationSignatureBatch(ctx, state, signed.Block().Body().Attestations()) if err != nil { return nil, nil, errors.Wrap(err, "could not retrieve attestation signature set") } diff --git a/beacon-chain/sync/batch_verifier.go b/beacon-chain/sync/batch_verifier.go index 7c412cc406..21adebd550 100644 --- a/beacon-chain/sync/batch_verifier.go +++ b/beacon-chain/sync/batch_verifier.go @@ -16,7 +16,7 @@ const signatureVerificationInterval = 50 * time.Millisecond const verifierLimit = 50 type signatureVerifier struct { - set *bls.SignatureSet + set *bls.SignatureBatch resChan chan error } @@ -49,7 +49,7 @@ func (s *Service) verifierRoutine() { } } -func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureSet) (pubsub.ValidationResult, error) { +func (s *Service) validateWithBatchVerifier(ctx context.Context, message string, set *bls.SignatureBatch) (pubsub.ValidationResult, error) { ctx, span := trace.StartSpan(ctx, "sync.validateWithBatchVerifier") defer span.End() diff --git a/beacon-chain/sync/batch_verifier_test.go b/beacon-chain/sync/batch_verifier_test.go index bb4a04d17d..8ce776ad75 100644 --- a/beacon-chain/sync/batch_verifier_test.go +++ b/beacon-chain/sync/batch_verifier_test.go @@ -15,12 +15,12 @@ func TestValidateWithBatchVerifier(t *testing.T) { assert.NoError(t, err) sig := keys[0].Sign(make([]byte, 32)) badSig := keys[1].Sign(make([]byte, 32)) - validSet := &bls.SignatureSet{ + validSet := &bls.SignatureBatch{ Messages: [][32]byte{{}}, PublicKeys: []bls.PublicKey{keys[0].PublicKey()}, Signatures: [][]byte{sig.Marshal()}, } - invalidSet := &bls.SignatureSet{ + invalidSet := &bls.SignatureBatch{ Messages: [][32]byte{{}}, PublicKeys: []bls.PublicKey{keys[0].PublicKey()}, Signatures: [][]byte{badSig.Marshal()}, @@ -28,8 +28,8 @@ func TestValidateWithBatchVerifier(t *testing.T) { tests := []struct { name string message string - set *bls.SignatureSet - preFilledSets []*bls.SignatureSet + set *bls.SignatureBatch + preFilledSets []*bls.SignatureBatch want pubsub.ValidationResult }{ { @@ -48,14 +48,14 @@ func TestValidateWithBatchVerifier(t *testing.T) { name: "invalid set in routine with valid set", message: "random", set: validSet, - preFilledSets: []*bls.SignatureSet{invalidSet}, + preFilledSets: []*bls.SignatureBatch{invalidSet}, want: pubsub.ValidationAccept, }, { name: "valid set in routine with invalid set", message: "random", set: invalidSet, - preFilledSets: []*bls.SignatureSet{validSet}, + preFilledSets: []*bls.SignatureBatch{validSet}, want: pubsub.ValidationReject, }, } diff --git a/beacon-chain/sync/validate_aggregate_proof.go b/beacon-chain/sync/validate_aggregate_proof.go index 886c3a5b49..fdb5d2546b 100644 --- a/beacon-chain/sync/validate_aggregate_proof.go +++ b/beacon-chain/sync/validate_aggregate_proof.go @@ -171,7 +171,7 @@ func (s *Service) validateAggregatedAtt(ctx context.Context, signed *ethpb.Signe tracing.AnnotateError(span, wrappedErr) return pubsub.ValidationIgnore, wrappedErr } - attSigSet, err := blocks.AttestationSignatureSet(ctx, bs, []*ethpb.Attestation{signed.Message.Aggregate}) + attSigSet, err := blocks.AttestationSignatureBatch(ctx, bs, []*ethpb.Attestation{signed.Message.Aggregate}) if err != nil { wrappedErr := errors.Wrapf(err, "Could not verify aggregator signature %d", signed.Message.AggregatorIndex) tracing.AnnotateError(span, wrappedErr) @@ -256,7 +256,7 @@ func validateSelectionIndex( data *ethpb.AttestationData, validatorIndex types.ValidatorIndex, proof []byte, -) (*bls.SignatureSet, error) { +) (*bls.SignatureBatch, error) { _, span := trace.StartSpan(ctx, "sync.validateSelectionIndex") defer span.End() @@ -293,7 +293,7 @@ func validateSelectionIndex( if err != nil { return nil, err } - return &bls.SignatureSet{ + return &bls.SignatureBatch{ Signatures: [][]byte{proof}, PublicKeys: []bls.PublicKey{publicKey}, Messages: [][32]byte{root}, @@ -301,7 +301,7 @@ func validateSelectionIndex( } // This returns aggregator signature set which can be used to batch verify. -func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationAndProof) (*bls.SignatureSet, error) { +func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationAndProof) (*bls.SignatureBatch, error) { v, err := s.ValidatorAtIndex(a.Message.AggregatorIndex) if err != nil { return nil, err @@ -320,7 +320,7 @@ func aggSigSet(s state.ReadOnlyBeaconState, a *ethpb.SignedAggregateAttestationA if err != nil { return nil, err } - return &bls.SignatureSet{ + return &bls.SignatureBatch{ Signatures: [][]byte{a.Signature}, PublicKeys: []bls.PublicKey{publicKey}, Messages: [][32]byte{root}, diff --git a/beacon-chain/sync/validate_beacon_attestation.go b/beacon-chain/sync/validate_beacon_attestation.go index f49b308ffb..a33f267230 100644 --- a/beacon-chain/sync/validate_beacon_attestation.go +++ b/beacon-chain/sync/validate_beacon_attestation.go @@ -219,7 +219,7 @@ func (s *Service) validateUnaggregatedAttWithState(ctx context.Context, a *eth.A } if features.Get().EnableBatchVerification { - set, err := blocks.AttestationSignatureSet(ctx, bs, []*eth.Attestation{a}) + set, err := blocks.AttestationSignatureBatch(ctx, bs, []*eth.Attestation{a}) if err != nil { tracing.AnnotateError(span, err) return pubsub.ValidationReject, err diff --git a/beacon-chain/sync/validate_sync_committee_message.go b/beacon-chain/sync/validate_sync_committee_message.go index 5b111c6e67..aac66bf3f0 100644 --- a/beacon-chain/sync/validate_sync_committee_message.go +++ b/beacon-chain/sync/validate_sync_committee_message.go @@ -243,7 +243,7 @@ func (s *Service) rejectInvalidSyncCommitteeSignature(m *ethpb.SyncCommitteeMess // the signature to a G2 point if batch verification is // enabled. if features.Get().EnableBatchVerification { - set := &bls.SignatureSet{ + set := &bls.SignatureBatch{ Messages: [][32]byte{sigRoot}, PublicKeys: []bls.PublicKey{pKey}, Signatures: [][]byte{m.Signature}, diff --git a/beacon-chain/sync/validate_sync_contribution_proof.go b/beacon-chain/sync/validate_sync_contribution_proof.go index e1a1c38863..c553f04c28 100644 --- a/beacon-chain/sync/validate_sync_contribution_proof.go +++ b/beacon-chain/sync/validate_sync_contribution_proof.go @@ -224,7 +224,7 @@ func (s *Service) rejectInvalidContributionSignature(m *ethpb.SignedContribution tracing.AnnotateError(span, err) return pubsub.ValidationReject, err } - set := &bls.SignatureSet{ + set := &bls.SignatureBatch{ Messages: [][32]byte{root}, PublicKeys: []bls.PublicKey{publicKey}, Signatures: [][]byte{m.Signature}, @@ -288,7 +288,7 @@ func (s *Service) rejectInvalidSyncAggregateSignature(m *ethpb.SignedContributio tracing.AnnotateError(span, err) return pubsub.ValidationIgnore, err } - set := &bls.SignatureSet{ + set := &bls.SignatureBatch{ Messages: [][32]byte{sigRoot}, PublicKeys: []bls.PublicKey{aggKey}, Signatures: [][]byte{m.Message.Contribution.Signature}, @@ -349,7 +349,7 @@ func (s *Service) verifySyncSelectionData(ctx context.Context, m *ethpb.Contribu if err != nil { return err } - set := &bls.SignatureSet{ + set := &bls.SignatureBatch{ Messages: [][32]byte{root}, PublicKeys: []bls.PublicKey{publicKey}, Signatures: [][]byte{m.SelectionProof}, diff --git a/crypto/bls/BUILD.bazel b/crypto/bls/BUILD.bazel index e918793e1f..0dec737726 100644 --- a/crypto/bls/BUILD.bazel +++ b/crypto/bls/BUILD.bazel @@ -7,7 +7,7 @@ go_library( "constants.go", "error.go", "interface.go", - "signature_set.go", + "signature_batch.go", ], importpath = "github.com/prysmaticlabs/prysm/crypto/bls", visibility = ["//visibility:public"], @@ -22,7 +22,7 @@ go_test( name = "go_default_test", srcs = [ "bls_test.go", - "signature_set_test.go", + "signature_batch_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/crypto/bls/signature_set.go b/crypto/bls/signature_batch.go similarity index 64% rename from crypto/bls/signature_set.go rename to crypto/bls/signature_batch.go index 89d4c8d620..8c2975cea7 100644 --- a/crypto/bls/signature_set.go +++ b/crypto/bls/signature_batch.go @@ -1,39 +1,39 @@ package bls -// SignatureSet refers to the defined set of +// SignatureBatch refers to the defined set of // signatures and its respective public keys and // messages required to verify it. -type SignatureSet struct { +type SignatureBatch struct { Signatures [][]byte PublicKeys []PublicKey Messages [][32]byte } -// NewSet constructs an empty signature set object. -func NewSet() *SignatureSet { - return &SignatureSet{ +// NewSet constructs an empty signature batch object. +func NewSet() *SignatureBatch { + return &SignatureBatch{ Signatures: [][]byte{}, PublicKeys: []PublicKey{}, Messages: [][32]byte{}, } } -// Join merges the provided signature set to out current one. -func (s *SignatureSet) Join(set *SignatureSet) *SignatureSet { +// Join merges the provided signature batch to out current one. +func (s *SignatureBatch) Join(set *SignatureBatch) *SignatureBatch { s.Signatures = append(s.Signatures, set.Signatures...) s.PublicKeys = append(s.PublicKeys, set.PublicKeys...) s.Messages = append(s.Messages, set.Messages...) return s } -// Verify the current signature set using the batch verify algorithm. -func (s *SignatureSet) Verify() (bool, error) { +// Verify the current signature batch using the batch verify algorithm. +func (s *SignatureBatch) Verify() (bool, error) { return VerifyMultipleSignatures(s.Signatures, s.Messages, s.PublicKeys) } -// Copy the attached signature set and return it +// Copy the attached signature batch and return it // to the caller. -func (s *SignatureSet) Copy() *SignatureSet { +func (s *SignatureBatch) Copy() *SignatureBatch { signatures := make([][]byte, len(s.Signatures)) pubkeys := make([]PublicKey, len(s.PublicKeys)) messages := make([][32]byte, len(s.Messages)) @@ -48,7 +48,7 @@ func (s *SignatureSet) Copy() *SignatureSet { for i := range s.Messages { copy(messages[i][:], s.Messages[i][:]) } - return &SignatureSet{ + return &SignatureBatch{ Signatures: signatures, PublicKeys: pubkeys, Messages: messages, diff --git a/crypto/bls/signature_set_test.go b/crypto/bls/signature_batch_test.go similarity index 92% rename from crypto/bls/signature_set_test.go rename to crypto/bls/signature_batch_test.go index ea5b198ac5..207c2c1d16 100644 --- a/crypto/bls/signature_set_test.go +++ b/crypto/bls/signature_batch_test.go @@ -23,17 +23,17 @@ func TestCopySignatureSet(t *testing.T) { sig2 := key2.Sign(message2[:]) sig3 := key3.Sign(message3[:]) - set := &SignatureSet{ + set := &SignatureBatch{ Signatures: [][]byte{sig.Marshal()}, PublicKeys: []PublicKey{key.PublicKey()}, Messages: [][32]byte{message}, } - set2 := &SignatureSet{ + set2 := &SignatureBatch{ Signatures: [][]byte{sig2.Marshal()}, PublicKeys: []PublicKey{key.PublicKey()}, Messages: [][32]byte{message}, } - set3 := &SignatureSet{ + set3 := &SignatureBatch{ Signatures: [][]byte{sig3.Marshal()}, PublicKeys: []PublicKey{key.PublicKey()}, Messages: [][32]byte{message}, diff --git a/testing/spectest/shared/altair/operations/attestation.go b/testing/spectest/shared/altair/operations/attestation.go index 46492cb9a4..efa9e702b8 100644 --- a/testing/spectest/shared/altair/operations/attestation.go +++ b/testing/spectest/shared/altair/operations/attestation.go @@ -36,7 +36,7 @@ func RunAttestationTest(t *testing.T, config string) { if err != nil { return nil, err } - aSet, err := b.AttestationSignatureSet(ctx, st, blk.Block().Body().Attestations()) + aSet, err := b.AttestationSignatureBatch(ctx, st, blk.Block().Body().Attestations()) if err != nil { return nil, err } diff --git a/testing/spectest/shared/phase0/operations/attestation.go b/testing/spectest/shared/phase0/operations/attestation.go index 532acd95f9..7417353480 100644 --- a/testing/spectest/shared/phase0/operations/attestation.go +++ b/testing/spectest/shared/phase0/operations/attestation.go @@ -36,7 +36,7 @@ func RunAttestationTest(t *testing.T, config string) { if err != nil { return nil, err } - aSet, err := b.AttestationSignatureSet(ctx, st, blk.Block().Body().Attestations()) + aSet, err := b.AttestationSignatureBatch(ctx, st, blk.Block().Body().Attestations()) if err != nil { return nil, err } From cd1e3f2b3e2a771aed5e994b1d81f0ff7ff71835 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 23 Nov 2021 09:49:06 -0800 Subject: [PATCH 17/45] Rename coinbase to fee recipient (#9918) * Rename coinbase to fee recipient * Fix imports * Update field name * Fee receipient * Fix goimports Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> --- beacon-chain/node/config.go | 4 +- beacon-chain/node/config_test.go | 6 +- beacon-chain/rpc/eth/beacon/config_test.go | 6 +- cmd/beacon-chain/flags/base.go | 6 +- cmd/beacon-chain/main.go | 2 +- cmd/beacon-chain/usage.go | 2 +- config/params/config.go | 2 +- proto/prysm/v1alpha1/beacon_block.pb.go | 94 +++++++++++----------- proto/prysm/v1alpha1/beacon_block.proto | 2 +- proto/prysm/v1alpha1/beacon_state.pb.go | 93 ++++++++++----------- proto/prysm/v1alpha1/beacon_state.proto | 2 +- proto/prysm/v1alpha1/cloners.go | 2 +- proto/prysm/v1alpha1/cloners_test.go | 2 +- 13 files changed, 112 insertions(+), 111 deletions(-) diff --git a/beacon-chain/node/config.go b/beacon-chain/node/config.go index 8af802363f..8b76bef321 100644 --- a/beacon-chain/node/config.go +++ b/beacon-chain/node/config.go @@ -113,9 +113,9 @@ func configureExecutionSetting(cliCtx *cli.Context) { c.TerminalBlockHashActivationEpoch = types.Epoch(cliCtx.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name)) params.OverrideBeaconConfig(c) } - if cliCtx.IsSet(flags.Coinbase.Name) { + if cliCtx.IsSet(flags.FeeRecipient.Name) { c := params.BeaconConfig() - c.Coinbase = common.HexToAddress(cliCtx.String(flags.Coinbase.Name)) + c.FeeRecipient = common.HexToAddress(cliCtx.String(flags.FeeRecipient.Name)) params.OverrideBeaconConfig(c) } } diff --git a/beacon-chain/node/config_test.go b/beacon-chain/node/config_test.go index 165a883a84..fa71843bb9 100644 --- a/beacon-chain/node/config_test.go +++ b/beacon-chain/node/config_test.go @@ -79,11 +79,11 @@ func TestConfigureExecutionSetting(t *testing.T) { set.Uint64(flags.TerminalTotalDifficultyOverride.Name, 0, "") set.String(flags.TerminalBlockHashOverride.Name, "", "") set.Uint64(flags.TerminalBlockHashActivationEpochOverride.Name, 0, "") - set.String(flags.Coinbase.Name, "", "") + set.String(flags.FeeRecipient.Name, "", "") require.NoError(t, set.Set(flags.TerminalTotalDifficultyOverride.Name, strconv.Itoa(100))) require.NoError(t, set.Set(flags.TerminalBlockHashOverride.Name, "0xA")) require.NoError(t, set.Set(flags.TerminalBlockHashActivationEpochOverride.Name, strconv.Itoa(200))) - require.NoError(t, set.Set(flags.Coinbase.Name, "0xB")) + require.NoError(t, set.Set(flags.FeeRecipient.Name, "0xB")) cliCtx := cli.NewContext(&app, set, nil) configureExecutionSetting(cliCtx) @@ -91,7 +91,7 @@ func TestConfigureExecutionSetting(t *testing.T) { assert.Equal(t, uint64(100), params.BeaconConfig().TerminalTotalDifficulty) assert.Equal(t, common.HexToHash("0xA"), params.BeaconConfig().TerminalBlockHash) assert.Equal(t, types.Epoch(200), params.BeaconConfig().TerminalBlockHashActivationEpoch) - assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().Coinbase) + assert.Equal(t, common.HexToAddress("0xB"), params.BeaconConfig().FeeRecipient) } func TestConfigureNetwork(t *testing.T) { diff --git a/beacon-chain/rpc/eth/beacon/config_test.go b/beacon-chain/rpc/eth/beacon/config_test.go index 95760bb679..210a08557b 100644 --- a/beacon-chain/rpc/eth/beacon/config_test.go +++ b/beacon-chain/rpc/eth/beacon/config_test.go @@ -100,7 +100,7 @@ func TestGetSpec(t *testing.T) { config.TerminalBlockHash = common.HexToHash("TerminalBlockHash") config.TerminalBlockHashActivationEpoch = 72 config.TerminalTotalDifficulty = 73 - config.Coinbase = common.HexToAddress("Coinbase") + config.FeeRecipient = common.HexToAddress("FeeRecipient") var dbp [4]byte copy(dbp[:], []byte{'0', '0', '0', '1'}) @@ -329,8 +329,8 @@ func TestGetSpec(t *testing.T) { assert.Equal(t, common.HexToHash("TerminalBlockHash"), common.HexToHash(v)) case "TERMINAL_TOTAL_DIFFICULTY": assert.Equal(t, "73", v) - case "COINBASE": - assert.Equal(t, common.HexToAddress("Coinbase"), v) + case "FeeRecipient": + assert.Equal(t, common.HexToAddress("FeeRecipient"), v) default: t.Errorf("Incorrect key: %s", k) } diff --git a/cmd/beacon-chain/flags/base.go b/cmd/beacon-chain/flags/base.go index 58ec24b1aa..dce19df517 100644 --- a/cmd/beacon-chain/flags/base.go +++ b/cmd/beacon-chain/flags/base.go @@ -211,9 +211,9 @@ var ( "WARNING: This flag should be used only if you have a clear understanding that community has decided to override the terminal block hash activation epoch. " + "Incorrect usage will result in your node experience consensus failure.", } - // Coinbase specifies the fee recipient for the transaction fees. - Coinbase = &cli.StringFlag{ - Name: "coinbase", + // FeeRecipient specifies the fee recipient for the transaction fees. + FeeRecipient = &cli.StringFlag{ + Name: "fee-recipient", Usage: "Post merge, this address will receive the transaction fees produced by any blocks from this node. Default to junk whilst merge is in development state.", Value: hex.EncodeToString([]byte("0x0000000000000000000000000000000000000001")), } diff --git a/cmd/beacon-chain/main.go b/cmd/beacon-chain/main.go index fe292a699b..d299058c93 100644 --- a/cmd/beacon-chain/main.go +++ b/cmd/beacon-chain/main.go @@ -68,7 +68,7 @@ var appFlags = []cli.Flag{ flags.TerminalTotalDifficultyOverride, flags.TerminalBlockHashOverride, flags.TerminalBlockHashActivationEpochOverride, - flags.Coinbase, + flags.FeeRecipient, cmd.EnableBackupWebhookFlag, cmd.BackupWebhookOutputDir, cmd.MinimalConfigFlag, diff --git a/cmd/beacon-chain/usage.go b/cmd/beacon-chain/usage.go index 166b126840..101358fffb 100644 --- a/cmd/beacon-chain/usage.go +++ b/cmd/beacon-chain/usage.go @@ -130,7 +130,7 @@ var appHelpFlagGroups = []flagGroup{ flags.TerminalTotalDifficultyOverride, flags.TerminalBlockHashOverride, flags.TerminalBlockHashActivationEpochOverride, - flags.Coinbase, + flags.FeeRecipient, }, }, { diff --git a/config/params/config.go b/config/params/config.go index a99ac50dbc..4cffc21e4d 100644 --- a/config/params/config.go +++ b/config/params/config.go @@ -182,7 +182,7 @@ type BeaconChainConfig struct { TerminalBlockHash common.Hash `yaml:"TERMINAL_BLOCK_HASH" spec:"true"` // TerminalBlockHash of beacon chain. TerminalBlockHashActivationEpoch types.Epoch `yaml:"TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH" spec:"true"` // TerminalBlockHashActivationEpoch of beacon chain. TerminalTotalDifficulty uint64 `yaml:"TERMINAL_TOTAL_DIFFICULTY" spec:"true"` // TerminalTotalDifficulty is part of the experimental merge spec. This value is type is currently TBD: https://github.com/ethereum/consensus-specs/blob/dev/specs/merge/beacon-chain.md#transition-settings - Coinbase common.Address // Coinbase where the transaction fee goes to. + FeeRecipient common.Address // FeeRecipient where the transaction fee goes to. } // InitializeForkSchedule initializes the schedules forks baked into the config. diff --git a/proto/prysm/v1alpha1/beacon_block.pb.go b/proto/prysm/v1alpha1/beacon_block.pb.go index 19d38039d2..5ee9faa66a 100755 --- a/proto/prysm/v1alpha1/beacon_block.pb.go +++ b/proto/prysm/v1alpha1/beacon_block.pb.go @@ -1548,7 +1548,7 @@ type ExecutionPayload struct { unknownFields protoimpl.UnknownFields ParentHash []byte `protobuf:"bytes,1,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty" ssz-size:"32"` - Coinbase []byte `protobuf:"bytes,2,opt,name=coinbase,proto3" json:"coinbase,omitempty" ssz-size:"20"` + FeeRecipient []byte `protobuf:"bytes,2,opt,name=fee_recipient,json=feeRecipient,proto3" json:"fee_recipient,omitempty" ssz-size:"20"` StateRoot []byte `protobuf:"bytes,3,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty" ssz-size:"32"` ReceiptRoot []byte `protobuf:"bytes,4,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty" ssz-size:"32"` LogsBloom []byte `protobuf:"bytes,5,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty" ssz-size:"256"` @@ -1602,9 +1602,9 @@ func (x *ExecutionPayload) GetParentHash() []byte { return nil } -func (x *ExecutionPayload) GetCoinbase() []byte { +func (x *ExecutionPayload) GetFeeRecipient() []byte { if x != nil { - return x.Coinbase + return x.FeeRecipient } return nil } @@ -2133,53 +2133,53 @@ var file_proto_prysm_v1alpha1_beacon_block_proto_rawDesc = []byte{ 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x52, 0x10, 0x65, 0x78, 0x65, - 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xb4, 0x04, + 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x22, 0xbd, 0x04, 0x0a, 0x10, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x27, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, - 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, 0x0a, 0x08, 0x63, - 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, - 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x12, - 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, - 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x73, 0x74, 0x61, - 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x29, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, - 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, - 0x18, 0x02, 0x33, 0x32, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, - 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, - 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, 0x8a, 0xb5, 0x18, 0x03, 0x32, 0x35, 0x36, 0x52, 0x09, - 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x1e, 0x0a, 0x06, 0x72, 0x61, 0x6e, - 0x64, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, - 0x32, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, - 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, - 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, - 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, - 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, - 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, - 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, - 0x6d, 0x70, 0x12, 0x25, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, - 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x92, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, - 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x10, 0x62, 0x61, 0x73, - 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x0c, 0x20, - 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x62, 0x61, 0x73, - 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, - 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, - 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, - 0x68, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0c, 0x42, 0x1d, 0x8a, 0xb5, 0x18, 0x03, 0x3f, 0x2c, 0x3f, - 0x92, 0xb5, 0x18, 0x12, 0x31, 0x30, 0x34, 0x38, 0x35, 0x37, 0x36, 0x2c, 0x31, 0x30, 0x37, 0x33, - 0x37, 0x34, 0x31, 0x38, 0x32, 0x34, 0x52, 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x98, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, - 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x42, 0x10, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x50, - 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, - 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, - 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, - 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, - 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, - 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, - 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2b, 0x0a, 0x0d, 0x66, + 0x65, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x0c, 0x66, 0x65, 0x65, 0x52, + 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, + 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, + 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, + 0x29, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, + 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0b, 0x72, + 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x6c, 0x6f, + 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, + 0x8a, 0xb5, 0x18, 0x03, 0x32, 0x35, 0x36, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, + 0x6f, 0x6d, 0x12, 0x1e, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, + 0x6f, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, + 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, + 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, + 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, + 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, + 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, + 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x25, 0x0a, 0x0a, 0x65, + 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x06, 0x92, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, + 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, + 0x47, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, + 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, + 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x41, 0x0a, 0x0c, 0x74, 0x72, + 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x0c, + 0x42, 0x1d, 0x8a, 0xb5, 0x18, 0x03, 0x3f, 0x2c, 0x3f, 0x92, 0xb5, 0x18, 0x12, 0x31, 0x30, 0x34, + 0x38, 0x35, 0x37, 0x36, 0x2c, 0x31, 0x30, 0x37, 0x33, 0x37, 0x34, 0x31, 0x38, 0x32, 0x34, 0x52, + 0x0c, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x98, 0x01, + 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, + 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x42, 0x65, 0x61, + 0x63, 0x6f, 0x6e, 0x42, 0x6c, 0x6f, 0x63, 0x6b, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, + 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, + 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, + 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/prysm/v1alpha1/beacon_block.proto b/proto/prysm/v1alpha1/beacon_block.proto index 494542a6a7..511b9728f7 100644 --- a/proto/prysm/v1alpha1/beacon_block.proto +++ b/proto/prysm/v1alpha1/beacon_block.proto @@ -360,7 +360,7 @@ message BeaconBlockBodyMerge { message ExecutionPayload { bytes parent_hash = 1 [(ethereum.eth.ext.ssz_size) = "32"]; - bytes coinbase = 2 [(ethereum.eth.ext.ssz_size) = "20"]; // 'beneficiary' in the yellow paper + bytes fee_recipient = 2 [(ethereum.eth.ext.ssz_size) = "20"]; // 'beneficiary' in the yellow paper bytes state_root = 3 [(ethereum.eth.ext.ssz_size) = "32"]; bytes receipt_root = 4 [(ethereum.eth.ext.ssz_size) = "32"]; // 'receipts root' in the yellow paper bytes logs_bloom = 5 [(ethereum.eth.ext.ssz_size) = "256"]; diff --git a/proto/prysm/v1alpha1/beacon_state.pb.go b/proto/prysm/v1alpha1/beacon_state.pb.go index daf0209631..1248628294 100755 --- a/proto/prysm/v1alpha1/beacon_state.pb.go +++ b/proto/prysm/v1alpha1/beacon_state.pb.go @@ -1318,7 +1318,7 @@ type ExecutionPayloadHeader struct { unknownFields protoimpl.UnknownFields ParentHash []byte `protobuf:"bytes,1,opt,name=parent_hash,json=parentHash,proto3" json:"parent_hash,omitempty" ssz-size:"32"` - Coinbase []byte `protobuf:"bytes,2,opt,name=coinbase,proto3" json:"coinbase,omitempty" ssz-size:"20"` + FeeRecipient []byte `protobuf:"bytes,2,opt,name=fee_recipient,json=feeRecipient,proto3" json:"fee_recipient,omitempty" ssz-size:"20"` StateRoot []byte `protobuf:"bytes,3,opt,name=state_root,json=stateRoot,proto3" json:"state_root,omitempty" ssz-size:"32"` ReceiptRoot []byte `protobuf:"bytes,4,opt,name=receipt_root,json=receiptRoot,proto3" json:"receipt_root,omitempty" ssz-size:"32"` LogsBloom []byte `protobuf:"bytes,5,opt,name=logs_bloom,json=logsBloom,proto3" json:"logs_bloom,omitempty" ssz-size:"256"` @@ -1372,9 +1372,9 @@ func (x *ExecutionPayloadHeader) GetParentHash() []byte { return nil } -func (x *ExecutionPayloadHeader) GetCoinbase() []byte { +func (x *ExecutionPayloadHeader) GetFeeRecipient() []byte { if x != nil { - return x.Coinbase + return x.FeeRecipient } return nil } @@ -1895,52 +1895,53 @@ var file_proto_prysm_v1alpha1_beacon_state_proto_rawDesc = []byte{ 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x52, 0x1c, 0x6c, 0x61, 0x74, 0x65, 0x73, 0x74, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, - 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0xac, 0x04, 0x0a, 0x16, 0x45, + 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x22, 0xb5, 0x04, 0x0a, 0x16, 0x45, 0x78, 0x65, 0x63, 0x75, 0x74, 0x69, 0x6f, 0x6e, 0x50, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x48, 0x65, 0x61, 0x64, 0x65, 0x72, 0x12, 0x27, 0x0a, 0x0b, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, - 0x33, 0x32, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x22, - 0x0a, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, 0x73, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, - 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x08, 0x63, 0x6f, 0x69, 0x6e, 0x62, 0x61, - 0x73, 0x65, 0x12, 0x25, 0x0a, 0x0a, 0x73, 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, - 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, - 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x29, 0x0a, 0x0c, 0x72, 0x65, 0x63, - 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, - 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, - 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x26, 0x0a, 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, - 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x07, 0x8a, 0xb5, 0x18, 0x03, 0x32, 0x35, - 0x36, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x1e, 0x0a, 0x06, - 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, - 0x18, 0x02, 0x33, 0x32, 0x52, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x12, 0x21, 0x0a, 0x0c, - 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, - 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, - 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, - 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, - 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, - 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, - 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x25, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, - 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x92, 0xb5, 0x18, 0x02, 0x33, - 0x32, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x10, - 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, - 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, - 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x25, 0x0a, - 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x68, 0x61, 0x73, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, - 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, - 0x48, 0x61, 0x73, 0x68, 0x12, 0x33, 0x0a, 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x42, - 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, - 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x6f, 0x6f, 0x74, 0x42, 0x98, 0x01, 0x0a, 0x19, 0x6f, 0x72, - 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, - 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x42, 0x10, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, - 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, - 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, - 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, - 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x41, 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, - 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, + 0x33, 0x32, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x48, 0x61, 0x73, 0x68, 0x12, 0x2b, + 0x0a, 0x0d, 0x66, 0x65, 0x65, 0x5f, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x32, 0x30, 0x52, 0x0c, 0x66, + 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0x25, 0x0a, 0x0a, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x5f, 0x72, 0x6f, 0x6f, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x73, 0x74, 0x61, 0x74, 0x65, 0x52, 0x6f, + 0x6f, 0x74, 0x12, 0x29, 0x0a, 0x0c, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, + 0x52, 0x0b, 0x72, 0x65, 0x63, 0x65, 0x69, 0x70, 0x74, 0x52, 0x6f, 0x6f, 0x74, 0x12, 0x26, 0x0a, + 0x0a, 0x6c, 0x6f, 0x67, 0x73, 0x5f, 0x62, 0x6c, 0x6f, 0x6f, 0x6d, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0c, 0x42, 0x07, 0x8a, 0xb5, 0x18, 0x03, 0x32, 0x35, 0x36, 0x52, 0x09, 0x6c, 0x6f, 0x67, 0x73, + 0x42, 0x6c, 0x6f, 0x6f, 0x6d, 0x12, 0x1e, 0x0a, 0x06, 0x72, 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x18, + 0x06, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x06, 0x72, + 0x61, 0x6e, 0x64, 0x6f, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, 0x6e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x62, 0x6c, 0x6f, + 0x63, 0x6b, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x12, 0x1b, 0x0a, 0x09, 0x67, 0x61, 0x73, 0x5f, + 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x04, 0x52, 0x08, 0x67, 0x61, 0x73, + 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x12, 0x19, 0x0a, 0x08, 0x67, 0x61, 0x73, 0x5f, 0x75, 0x73, 0x65, + 0x64, 0x18, 0x09, 0x20, 0x01, 0x28, 0x04, 0x52, 0x07, 0x67, 0x61, 0x73, 0x55, 0x73, 0x65, 0x64, + 0x12, 0x1c, 0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x0a, 0x20, + 0x01, 0x28, 0x04, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x25, + 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x72, 0x61, 0x5f, 0x64, 0x61, 0x74, 0x61, 0x18, 0x0b, 0x20, 0x01, + 0x28, 0x0c, 0x42, 0x06, 0x92, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x09, 0x65, 0x78, 0x74, 0x72, + 0x61, 0x44, 0x61, 0x74, 0x61, 0x12, 0x2f, 0x0a, 0x10, 0x62, 0x61, 0x73, 0x65, 0x5f, 0x66, 0x65, + 0x65, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x67, 0x61, 0x73, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0c, 0x42, + 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, 0x52, 0x0d, 0x62, 0x61, 0x73, 0x65, 0x46, 0x65, 0x65, + 0x50, 0x65, 0x72, 0x47, 0x61, 0x73, 0x12, 0x25, 0x0a, 0x0a, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x5f, + 0x68, 0x61, 0x73, 0x68, 0x18, 0x0d, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, + 0x33, 0x32, 0x52, 0x09, 0x62, 0x6c, 0x6f, 0x63, 0x6b, 0x48, 0x61, 0x73, 0x68, 0x12, 0x33, 0x0a, + 0x11, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x5f, 0x72, 0x6f, + 0x6f, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0c, 0x42, 0x06, 0x8a, 0xb5, 0x18, 0x02, 0x33, 0x32, + 0x52, 0x10, 0x74, 0x72, 0x61, 0x6e, 0x73, 0x61, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x73, 0x52, 0x6f, + 0x6f, 0x74, 0x42, 0x98, 0x01, 0x0a, 0x19, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x42, 0x10, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x37, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, + 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, + 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, + 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x3b, 0x65, 0x74, 0x68, 0xaa, 0x02, 0x15, + 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x56, 0x31, 0x41, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0xca, 0x02, 0x15, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, + 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x62, 0x06, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( diff --git a/proto/prysm/v1alpha1/beacon_state.proto b/proto/prysm/v1alpha1/beacon_state.proto index 9b2d667623..5991afa7fd 100644 --- a/proto/prysm/v1alpha1/beacon_state.proto +++ b/proto/prysm/v1alpha1/beacon_state.proto @@ -237,7 +237,7 @@ message BeaconStateMerge { message ExecutionPayloadHeader { bytes parent_hash = 1 [(ethereum.eth.ext.ssz_size) = "32"]; - bytes coinbase = 2 [(ethereum.eth.ext.ssz_size) = "20"]; + bytes fee_recipient = 2 [(ethereum.eth.ext.ssz_size) = "20"]; bytes state_root = 3 [(ethereum.eth.ext.ssz_size) = "32"]; bytes receipt_root = 4 [(ethereum.eth.ext.ssz_size) = "32"]; bytes logs_bloom = 5 [(ethereum.eth.ext.ssz_size) = "256"]; diff --git a/proto/prysm/v1alpha1/cloners.go b/proto/prysm/v1alpha1/cloners.go index 2e8e4d7823..f6710e7bf1 100644 --- a/proto/prysm/v1alpha1/cloners.go +++ b/proto/prysm/v1alpha1/cloners.go @@ -384,7 +384,7 @@ func CopyExecutionPayloadHeader(payload *ExecutionPayloadHeader) *ExecutionPaylo } return &ExecutionPayloadHeader{ ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash), - Coinbase: bytesutil.SafeCopyBytes(payload.Coinbase), + FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient), StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot), ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptRoot), LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom), diff --git a/proto/prysm/v1alpha1/cloners_test.go b/proto/prysm/v1alpha1/cloners_test.go index 3fb20fd07b..6368036720 100644 --- a/proto/prysm/v1alpha1/cloners_test.go +++ b/proto/prysm/v1alpha1/cloners_test.go @@ -586,7 +586,7 @@ func genSyncCommitteeMessage() *v1alpha1.SyncCommitteeMessage { func genPayloadHeader() *v1alpha1.ExecutionPayloadHeader { return &v1alpha1.ExecutionPayloadHeader{ ParentHash: bytes(), - Coinbase: bytes(), + FeeRecipient: bytes(), StateRoot: bytes(), ReceiptRoot: bytes(), LogsBloom: bytes(), From 4858de787558c792b01aae44bc3902859b98fcac Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 23 Nov 2021 13:28:24 -0800 Subject: [PATCH 18/45] Use `prysmaticlabs/fastssz` (#9928) * Use prysmaticlabs/fastssz * Generated code --- deps.bzl | 11 +--- go.mod | 2 + go.sum | 5 +- proto/eth/v1/generated.ssz.go | 22 +++---- proto/eth/v2/generated.ssz.go | 22 +++---- proto/prysm/v1alpha1/generated.ssz.go | 92 +++++++++++++++------------ 6 files changed, 80 insertions(+), 74 deletions(-) diff --git a/deps.bzl b/deps.bzl index d265cb4b37..77f34a6684 100644 --- a/deps.bzl +++ b/deps.bzl @@ -839,8 +839,9 @@ def prysm_deps(): name = "com_github_ferranbt_fastssz", importpath = "github.com/ferranbt/fastssz", nofuzz = True, - sum = "h1:6dVcS0LktRSyEEgldFY4N9J17WjUoiJStttH+RZj0Wo=", - version = "v0.0.0-20210905181407-59cf6761a7d5", + replace = "github.com/prysmaticlabs/fastssz", + sum = "h1:BC9nIbhpQMyFlmLUJsVv8/+UewAVIjJegtvgaP9bV/M=", + version = "v0.0.0-20211123050228-97d96f38caae", ) go_repository( @@ -2685,12 +2686,6 @@ def prysm_deps(): version = "v1.0.1", ) - go_repository( - name = "com_github_nbutton23_zxcvbn_go", - importpath = "github.com/nbutton23/zxcvbn-go", - sum = "h1:AREM5mwr4u1ORQBMvzfzBgpsctsbQikCVpvC+tX285E=", - version = "v0.0.0-20180912185939-ae427f1e4c1d", - ) go_repository( name = "com_github_neelance_astrewrite", importpath = "github.com/neelance/astrewrite", diff --git a/go.mod b/go.mod index d078ee606a..7c8fa5dfaa 100644 --- a/go.mod +++ b/go.mod @@ -127,3 +127,5 @@ replace github.com/json-iterator/go => github.com/prestonvanloon/go v1.1.7-0.201 // See https://github.com/prysmaticlabs/grpc-gateway/issues/2 replace github.com/grpc-ecosystem/grpc-gateway/v2 => github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20210702154020-550e1cd83ec1 + +replace github.com/ferranbt/fastssz => github.com/prysmaticlabs/fastssz v0.0.0-20211123050228-97d96f38caae diff --git a/go.sum b/go.sum index e55b54cb65..c825bf4173 100644 --- a/go.sum +++ b/go.sum @@ -273,9 +273,6 @@ github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLi github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= github.com/fatih/color v1.9.0/go.mod h1:eQcE1qtQxscV5RaZvpXrrb8Drkc3/DdQ+uUYCNjL+zU= -github.com/ferranbt/fastssz v0.0.0-20210120143747-11b9eff30ea9/go.mod h1:DyEu2iuLBnb/T51BlsiO3yLYdJC6UbGMrIkqK1KmQxM= -github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 h1:6dVcS0LktRSyEEgldFY4N9J17WjUoiJStttH+RZj0Wo= -github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c= github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0= github.com/flynn/go-shlex v0.0.0-20150515145356-3f9db97f8568/go.mod h1:xEzjJPgXI435gkrCt3MPfRiAkVrwSbHsst4LCFVfpJc= @@ -1193,6 +1190,8 @@ github.com/prometheus/tsdb v0.10.0 h1:If5rVCMTp6W2SiRAQFlbpJNgVlgMEd+U2GZckwK38i github.com/prometheus/tsdb v0.10.0/go.mod h1:oi49uRhEe9dPUTlS3JRZOwJuVi6tmh10QSgwXEyGCt4= github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d h1:1dN7YAqMN3oAJ0LceWcyv/U4jHLh+5urnSnr4br6zg4= github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d/go.mod h1:kOmQ/zdobQf7HUohDTifDNFEZfNaSCIY5fkONPL+dWU= +github.com/prysmaticlabs/fastssz v0.0.0-20211123050228-97d96f38caae h1:BC9nIbhpQMyFlmLUJsVv8/+UewAVIjJegtvgaP9bV/M= +github.com/prysmaticlabs/fastssz v0.0.0-20211123050228-97d96f38caae/go.mod h1:S8yiDeAXy8f88W4Ul+0dBMPx49S05byYbmZD6Uv94K4= github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s= github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 h1:0tVE4tdWQK9ZpYygoV7+vS6QkDvQVySboMVEIxBJmXw= github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4= diff --git a/proto/eth/v1/generated.ssz.go b/proto/eth/v1/generated.ssz.go index 2fb9c72e38..def2a10079 100644 --- a/proto/eth/v1/generated.ssz.go +++ b/proto/eth/v1/generated.ssz.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: 2ef8662cd6bab2d476ca79b4a918b5f0177316ea961f47983c0976eb415e46aa +// Hash: b3c7beb6fee67693e7cbc61184abd8465c51bc99f505ee2ff986d61f34ce9042 package v1 import ( @@ -1166,8 +1166,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.ProposerSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.ProposerSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1182,8 +1182,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.AttesterSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.AttesterSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1198,8 +1198,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Attestations[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Attestations { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1214,8 +1214,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Deposits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Deposits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1230,8 +1230,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.VoluntaryExits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.VoluntaryExits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } diff --git a/proto/eth/v2/generated.ssz.go b/proto/eth/v2/generated.ssz.go index a6c38a2fc4..0d296a11eb 100644 --- a/proto/eth/v2/generated.ssz.go +++ b/proto/eth/v2/generated.ssz.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: c73c221c909430e49bd5b4bc72e72ff54596cc34e5b869bda04c4adfa5884321 +// Hash: 360f153586a98510952b3e58d20707a672f519aa0532f17e350dca2562c69784 package eth import ( @@ -634,8 +634,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.ProposerSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.ProposerSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -650,8 +650,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.AttesterSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.AttesterSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -666,8 +666,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Attestations[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Attestations { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -682,8 +682,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Deposits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Deposits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -698,8 +698,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.VoluntaryExits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.VoluntaryExits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } diff --git a/proto/prysm/v1alpha1/generated.ssz.go b/proto/prysm/v1alpha1/generated.ssz.go index db78127830..e341d5694d 100644 --- a/proto/prysm/v1alpha1/generated.ssz.go +++ b/proto/prysm/v1alpha1/generated.ssz.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: 185d8107f2685f571ffa9c3a37f5481bd6c4861503030c71fad71950af93a6f7 +// Hash: 5007f7f4cc13cfbee54b516ee840fe0426c6a290b99a9f677c6008e98887ae2f package eth import ( @@ -1422,8 +1422,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.ProposerSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.ProposerSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1438,8 +1438,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.AttesterSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.AttesterSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1454,8 +1454,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Attestations[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Attestations { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1470,8 +1470,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Deposits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Deposits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1486,8 +1486,8 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.VoluntaryExits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.VoluntaryExits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1868,8 +1868,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.ProposerSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.ProposerSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1884,8 +1884,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.AttesterSlashings[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.AttesterSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1900,8 +1900,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Attestations[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Attestations { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1916,8 +1916,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Deposits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Deposits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -1932,8 +1932,8 @@ func (b *BeaconBlockBodyAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.VoluntaryExits[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.VoluntaryExits { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -3661,8 +3661,8 @@ func (b *BeaconState) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Eth1DataVotes[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Eth1DataVotes { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -3680,8 +3680,8 @@ func (b *BeaconState) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Validators[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Validators { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -3741,8 +3741,8 @@ func (b *BeaconState) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.PreviousEpochAttestations[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.PreviousEpochAttestations { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -3757,8 +3757,8 @@ func (b *BeaconState) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.CurrentEpochAttestations[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.CurrentEpochAttestations { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -4431,8 +4431,8 @@ func (b *BeaconStateAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Eth1DataVotes[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Eth1DataVotes { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -4450,8 +4450,8 @@ func (b *BeaconStateAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { err = ssz.ErrIncorrectListSize return } - for i := uint64(0); i < num; i++ { - if err = b.Validators[i].HashTreeRootWith(hh); err != nil { + for _, elem := range b.Validators { + if err = elem.HashTreeRootWith(hh); err != nil { return } } @@ -4504,18 +4504,28 @@ func (b *BeaconStateAltair) HashTreeRootWith(hh *ssz.Hasher) (err error) { } // Field (15) 'PreviousEpochParticipation' - if len(b.PreviousEpochParticipation) > 1099511627776 { - err = ssz.ErrBytesLength - return + { + elemIndx := hh.Index() + byteLen := uint64(len(b.PreviousEpochParticipation)) + if byteLen > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(b.PreviousEpochParticipation) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1099511627776+31)/32) } - hh.PutBytes(b.PreviousEpochParticipation) // Field (16) 'CurrentEpochParticipation' - if len(b.CurrentEpochParticipation) > 1099511627776 { - err = ssz.ErrBytesLength - return + { + elemIndx := hh.Index() + byteLen := uint64(len(b.CurrentEpochParticipation)) + if byteLen > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(b.CurrentEpochParticipation) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1099511627776+31)/32) } - hh.PutBytes(b.CurrentEpochParticipation) // Field (17) 'JustificationBits' if len(b.JustificationBits) != 1 { From 448d62d6e3e75e8aac9fcbed88d427aea1b51f55 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 23 Nov 2021 15:34:31 -0800 Subject: [PATCH 19/45] Add merge beacon chain objects and generate ssz.go (#9929) --- proto/eth/v1/generated.ssz.go | 2 +- proto/prysm/v1alpha1/BUILD.bazel | 4 + proto/prysm/v1alpha1/generated.ssz.go | 2256 ++++++++++++++++++++++++- 3 files changed, 2260 insertions(+), 2 deletions(-) diff --git a/proto/eth/v1/generated.ssz.go b/proto/eth/v1/generated.ssz.go index def2a10079..d32611ebae 100644 --- a/proto/eth/v1/generated.ssz.go +++ b/proto/eth/v1/generated.ssz.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: b3c7beb6fee67693e7cbc61184abd8465c51bc99f505ee2ff986d61f34ce9042 +// Hash: a4b9bf0c477f52b0980da66582aa23b181985388419d9551e8e256729d71f1d0 package v1 import ( diff --git a/proto/prysm/v1alpha1/BUILD.bazel b/proto/prysm/v1alpha1/BUILD.bazel index e25e4c56fd..a9aa86a030 100644 --- a/proto/prysm/v1alpha1/BUILD.bazel +++ b/proto/prysm/v1alpha1/BUILD.bazel @@ -58,6 +58,9 @@ ssz_gen_marshal( "BeaconBlockAltair", "BeaconBlockBodyAltair", "SignedBeaconBlockAltair", + "BeaconBlockMerge", + "BeaconBlockBodyMerge", + "SignedBeaconBlockMerge", "SyncAggregate", "SyncCommitteeMessage", "SyncCommitteeContribution", @@ -94,6 +97,7 @@ ssz_gen_marshal( "Status", "BeaconState", "BeaconStateAltair", + "BeaconStateMerge", "SigningData", "SyncCommittee", "SyncAggregatorSelectionData", diff --git a/proto/prysm/v1alpha1/generated.ssz.go b/proto/prysm/v1alpha1/generated.ssz.go index e341d5694d..9f5863f149 100644 --- a/proto/prysm/v1alpha1/generated.ssz.go +++ b/proto/prysm/v1alpha1/generated.ssz.go @@ -1,5 +1,5 @@ // Code generated by fastssz. DO NOT EDIT. -// Hash: 5007f7f4cc13cfbee54b516ee840fe0426c6a290b99a9f677c6008e98887ae2f +// Hash: 036fa88e7ecacbc13431c0b240e9aca847ce683c0568236674aebfd928cb3a63 package eth import ( @@ -2922,6 +2922,1125 @@ func (s *SyncAggregate) HashTreeRootWith(hh *ssz.Hasher) (err error) { return } +// MarshalSSZ ssz marshals the SignedBeaconBlockMerge object +func (s *SignedBeaconBlockMerge) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(s) +} + +// MarshalSSZTo ssz marshals the SignedBeaconBlockMerge object to a target array +func (s *SignedBeaconBlockMerge) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(100) + + // Offset (0) 'Block' + dst = ssz.WriteOffset(dst, offset) + if s.Block == nil { + s.Block = new(BeaconBlockMerge) + } + offset += s.Block.SizeSSZ() + + // Field (1) 'Signature' + if len(s.Signature) != 96 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, s.Signature...) + + // Field (0) 'Block' + if dst, err = s.Block.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the SignedBeaconBlockMerge object +func (s *SignedBeaconBlockMerge) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 100 { + return ssz.ErrSize + } + + tail := buf + var o0 uint64 + + // Offset (0) 'Block' + if o0 = ssz.ReadOffset(buf[0:4]); o0 > size { + return ssz.ErrOffset + } + + if o0 < 100 { + return ssz.ErrInvalidVariableOffset + } + + // Field (1) 'Signature' + if cap(s.Signature) == 0 { + s.Signature = make([]byte, 0, len(buf[4:100])) + } + s.Signature = append(s.Signature, buf[4:100]...) + + // Field (0) 'Block' + { + buf = tail[o0:] + if s.Block == nil { + s.Block = new(BeaconBlockMerge) + } + if err = s.Block.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the SignedBeaconBlockMerge object +func (s *SignedBeaconBlockMerge) SizeSSZ() (size int) { + size = 100 + + // Field (0) 'Block' + if s.Block == nil { + s.Block = new(BeaconBlockMerge) + } + size += s.Block.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the SignedBeaconBlockMerge object +func (s *SignedBeaconBlockMerge) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(s) +} + +// HashTreeRootWith ssz hashes the SignedBeaconBlockMerge object with a hasher +func (s *SignedBeaconBlockMerge) HashTreeRootWith(hh *ssz.Hasher) (err error) { + indx := hh.Index() + + // Field (0) 'Block' + if err = s.Block.HashTreeRootWith(hh); err != nil { + return + } + + // Field (1) 'Signature' + if len(s.Signature) != 96 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(s.Signature) + + hh.Merkleize(indx) + return +} + +// MarshalSSZ ssz marshals the BeaconBlockMerge object +func (b *BeaconBlockMerge) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconBlockMerge object to a target array +func (b *BeaconBlockMerge) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(84) + + // Field (0) 'Slot' + dst = ssz.MarshalUint64(dst, uint64(b.Slot)) + + // Field (1) 'ProposerIndex' + dst = ssz.MarshalUint64(dst, uint64(b.ProposerIndex)) + + // Field (2) 'ParentRoot' + if len(b.ParentRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.ParentRoot...) + + // Field (3) 'StateRoot' + if len(b.StateRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.StateRoot...) + + // Offset (4) 'Body' + dst = ssz.WriteOffset(dst, offset) + if b.Body == nil { + b.Body = new(BeaconBlockBodyMerge) + } + offset += b.Body.SizeSSZ() + + // Field (4) 'Body' + if dst, err = b.Body.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconBlockMerge object +func (b *BeaconBlockMerge) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 84 { + return ssz.ErrSize + } + + tail := buf + var o4 uint64 + + // Field (0) 'Slot' + b.Slot = github_com_prysmaticlabs_eth2_types.Slot(ssz.UnmarshallUint64(buf[0:8])) + + // Field (1) 'ProposerIndex' + b.ProposerIndex = github_com_prysmaticlabs_eth2_types.ValidatorIndex(ssz.UnmarshallUint64(buf[8:16])) + + // Field (2) 'ParentRoot' + if cap(b.ParentRoot) == 0 { + b.ParentRoot = make([]byte, 0, len(buf[16:48])) + } + b.ParentRoot = append(b.ParentRoot, buf[16:48]...) + + // Field (3) 'StateRoot' + if cap(b.StateRoot) == 0 { + b.StateRoot = make([]byte, 0, len(buf[48:80])) + } + b.StateRoot = append(b.StateRoot, buf[48:80]...) + + // Offset (4) 'Body' + if o4 = ssz.ReadOffset(buf[80:84]); o4 > size { + return ssz.ErrOffset + } + + if o4 < 84 { + return ssz.ErrInvalidVariableOffset + } + + // Field (4) 'Body' + { + buf = tail[o4:] + if b.Body == nil { + b.Body = new(BeaconBlockBodyMerge) + } + if err = b.Body.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockMerge object +func (b *BeaconBlockMerge) SizeSSZ() (size int) { + size = 84 + + // Field (4) 'Body' + if b.Body == nil { + b.Body = new(BeaconBlockBodyMerge) + } + size += b.Body.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the BeaconBlockMerge object +func (b *BeaconBlockMerge) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconBlockMerge object with a hasher +func (b *BeaconBlockMerge) HashTreeRootWith(hh *ssz.Hasher) (err error) { + indx := hh.Index() + + // Field (0) 'Slot' + hh.PutUint64(uint64(b.Slot)) + + // Field (1) 'ProposerIndex' + hh.PutUint64(uint64(b.ProposerIndex)) + + // Field (2) 'ParentRoot' + if len(b.ParentRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(b.ParentRoot) + + // Field (3) 'StateRoot' + if len(b.StateRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(b.StateRoot) + + // Field (4) 'Body' + if err = b.Body.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// MarshalSSZ ssz marshals the BeaconBlockBodyMerge object +func (b *BeaconBlockBodyMerge) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconBlockBodyMerge object to a target array +func (b *BeaconBlockBodyMerge) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(384) + + // Field (0) 'RandaoReveal' + if len(b.RandaoReveal) != 96 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.RandaoReveal...) + + // Field (1) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if dst, err = b.Eth1Data.MarshalSSZTo(dst); err != nil { + return + } + + // Field (2) 'Graffiti' + if len(b.Graffiti) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.Graffiti...) + + // Offset (3) 'ProposerSlashings' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.ProposerSlashings) * 416 + + // Offset (4) 'AttesterSlashings' + dst = ssz.WriteOffset(dst, offset) + for ii := 0; ii < len(b.AttesterSlashings); ii++ { + offset += 4 + offset += b.AttesterSlashings[ii].SizeSSZ() + } + + // Offset (5) 'Attestations' + dst = ssz.WriteOffset(dst, offset) + for ii := 0; ii < len(b.Attestations); ii++ { + offset += 4 + offset += b.Attestations[ii].SizeSSZ() + } + + // Offset (6) 'Deposits' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Deposits) * 1240 + + // Offset (7) 'VoluntaryExits' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.VoluntaryExits) * 112 + + // Field (8) 'SyncAggregate' + if b.SyncAggregate == nil { + b.SyncAggregate = new(SyncAggregate) + } + if dst, err = b.SyncAggregate.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (9) 'ExecutionPayload' + dst = ssz.WriteOffset(dst, offset) + if b.ExecutionPayload == nil { + b.ExecutionPayload = new(ExecutionPayload) + } + offset += b.ExecutionPayload.SizeSSZ() + + // Field (3) 'ProposerSlashings' + if len(b.ProposerSlashings) > 16 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.ProposerSlashings); ii++ { + if dst, err = b.ProposerSlashings[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (4) 'AttesterSlashings' + if len(b.AttesterSlashings) > 2 { + err = ssz.ErrListTooBig + return + } + { + offset = 4 * len(b.AttesterSlashings) + for ii := 0; ii < len(b.AttesterSlashings); ii++ { + dst = ssz.WriteOffset(dst, offset) + offset += b.AttesterSlashings[ii].SizeSSZ() + } + } + for ii := 0; ii < len(b.AttesterSlashings); ii++ { + if dst, err = b.AttesterSlashings[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (5) 'Attestations' + if len(b.Attestations) > 128 { + err = ssz.ErrListTooBig + return + } + { + offset = 4 * len(b.Attestations) + for ii := 0; ii < len(b.Attestations); ii++ { + dst = ssz.WriteOffset(dst, offset) + offset += b.Attestations[ii].SizeSSZ() + } + } + for ii := 0; ii < len(b.Attestations); ii++ { + if dst, err = b.Attestations[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (6) 'Deposits' + if len(b.Deposits) > 16 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.Deposits); ii++ { + if dst, err = b.Deposits[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (7) 'VoluntaryExits' + if len(b.VoluntaryExits) > 16 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.VoluntaryExits); ii++ { + if dst, err = b.VoluntaryExits[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (9) 'ExecutionPayload' + if dst, err = b.ExecutionPayload.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconBlockBodyMerge object +func (b *BeaconBlockBodyMerge) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 384 { + return ssz.ErrSize + } + + tail := buf + var o3, o4, o5, o6, o7, o9 uint64 + + // Field (0) 'RandaoReveal' + if cap(b.RandaoReveal) == 0 { + b.RandaoReveal = make([]byte, 0, len(buf[0:96])) + } + b.RandaoReveal = append(b.RandaoReveal, buf[0:96]...) + + // Field (1) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if err = b.Eth1Data.UnmarshalSSZ(buf[96:168]); err != nil { + return err + } + + // Field (2) 'Graffiti' + if cap(b.Graffiti) == 0 { + b.Graffiti = make([]byte, 0, len(buf[168:200])) + } + b.Graffiti = append(b.Graffiti, buf[168:200]...) + + // Offset (3) 'ProposerSlashings' + if o3 = ssz.ReadOffset(buf[200:204]); o3 > size { + return ssz.ErrOffset + } + + if o3 < 384 { + return ssz.ErrInvalidVariableOffset + } + + // Offset (4) 'AttesterSlashings' + if o4 = ssz.ReadOffset(buf[204:208]); o4 > size || o3 > o4 { + return ssz.ErrOffset + } + + // Offset (5) 'Attestations' + if o5 = ssz.ReadOffset(buf[208:212]); o5 > size || o4 > o5 { + return ssz.ErrOffset + } + + // Offset (6) 'Deposits' + if o6 = ssz.ReadOffset(buf[212:216]); o6 > size || o5 > o6 { + return ssz.ErrOffset + } + + // Offset (7) 'VoluntaryExits' + if o7 = ssz.ReadOffset(buf[216:220]); o7 > size || o6 > o7 { + return ssz.ErrOffset + } + + // Field (8) 'SyncAggregate' + if b.SyncAggregate == nil { + b.SyncAggregate = new(SyncAggregate) + } + if err = b.SyncAggregate.UnmarshalSSZ(buf[220:380]); err != nil { + return err + } + + // Offset (9) 'ExecutionPayload' + if o9 = ssz.ReadOffset(buf[380:384]); o9 > size || o7 > o9 { + return ssz.ErrOffset + } + + // Field (3) 'ProposerSlashings' + { + buf = tail[o3:o4] + num, err := ssz.DivideInt2(len(buf), 416, 16) + if err != nil { + return err + } + b.ProposerSlashings = make([]*ProposerSlashing, num) + for ii := 0; ii < num; ii++ { + if b.ProposerSlashings[ii] == nil { + b.ProposerSlashings[ii] = new(ProposerSlashing) + } + if err = b.ProposerSlashings[ii].UnmarshalSSZ(buf[ii*416 : (ii+1)*416]); err != nil { + return err + } + } + } + + // Field (4) 'AttesterSlashings' + { + buf = tail[o4:o5] + num, err := ssz.DecodeDynamicLength(buf, 2) + if err != nil { + return err + } + b.AttesterSlashings = make([]*AttesterSlashing, num) + err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) { + if b.AttesterSlashings[indx] == nil { + b.AttesterSlashings[indx] = new(AttesterSlashing) + } + if err = b.AttesterSlashings[indx].UnmarshalSSZ(buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + // Field (5) 'Attestations' + { + buf = tail[o5:o6] + num, err := ssz.DecodeDynamicLength(buf, 128) + if err != nil { + return err + } + b.Attestations = make([]*Attestation, num) + err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) { + if b.Attestations[indx] == nil { + b.Attestations[indx] = new(Attestation) + } + if err = b.Attestations[indx].UnmarshalSSZ(buf); err != nil { + return err + } + return nil + }) + if err != nil { + return err + } + } + + // Field (6) 'Deposits' + { + buf = tail[o6:o7] + num, err := ssz.DivideInt2(len(buf), 1240, 16) + if err != nil { + return err + } + b.Deposits = make([]*Deposit, num) + for ii := 0; ii < num; ii++ { + if b.Deposits[ii] == nil { + b.Deposits[ii] = new(Deposit) + } + if err = b.Deposits[ii].UnmarshalSSZ(buf[ii*1240 : (ii+1)*1240]); err != nil { + return err + } + } + } + + // Field (7) 'VoluntaryExits' + { + buf = tail[o7:o9] + num, err := ssz.DivideInt2(len(buf), 112, 16) + if err != nil { + return err + } + b.VoluntaryExits = make([]*SignedVoluntaryExit, num) + for ii := 0; ii < num; ii++ { + if b.VoluntaryExits[ii] == nil { + b.VoluntaryExits[ii] = new(SignedVoluntaryExit) + } + if err = b.VoluntaryExits[ii].UnmarshalSSZ(buf[ii*112 : (ii+1)*112]); err != nil { + return err + } + } + } + + // Field (9) 'ExecutionPayload' + { + buf = tail[o9:] + if b.ExecutionPayload == nil { + b.ExecutionPayload = new(ExecutionPayload) + } + if err = b.ExecutionPayload.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlockBodyMerge object +func (b *BeaconBlockBodyMerge) SizeSSZ() (size int) { + size = 384 + + // Field (3) 'ProposerSlashings' + size += len(b.ProposerSlashings) * 416 + + // Field (4) 'AttesterSlashings' + for ii := 0; ii < len(b.AttesterSlashings); ii++ { + size += 4 + size += b.AttesterSlashings[ii].SizeSSZ() + } + + // Field (5) 'Attestations' + for ii := 0; ii < len(b.Attestations); ii++ { + size += 4 + size += b.Attestations[ii].SizeSSZ() + } + + // Field (6) 'Deposits' + size += len(b.Deposits) * 1240 + + // Field (7) 'VoluntaryExits' + size += len(b.VoluntaryExits) * 112 + + // Field (9) 'ExecutionPayload' + if b.ExecutionPayload == nil { + b.ExecutionPayload = new(ExecutionPayload) + } + size += b.ExecutionPayload.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the BeaconBlockBodyMerge object +func (b *BeaconBlockBodyMerge) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconBlockBodyMerge object with a hasher +func (b *BeaconBlockBodyMerge) HashTreeRootWith(hh *ssz.Hasher) (err error) { + indx := hh.Index() + + // Field (0) 'RandaoReveal' + if len(b.RandaoReveal) != 96 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(b.RandaoReveal) + + // Field (1) 'Eth1Data' + if err = b.Eth1Data.HashTreeRootWith(hh); err != nil { + return + } + + // Field (2) 'Graffiti' + if len(b.Graffiti) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(b.Graffiti) + + // Field (3) 'ProposerSlashings' + { + subIndx := hh.Index() + num := uint64(len(b.ProposerSlashings)) + if num > 16 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.ProposerSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16) + } + + // Field (4) 'AttesterSlashings' + { + subIndx := hh.Index() + num := uint64(len(b.AttesterSlashings)) + if num > 2 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.AttesterSlashings { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 2) + } + + // Field (5) 'Attestations' + { + subIndx := hh.Index() + num := uint64(len(b.Attestations)) + if num > 128 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Attestations { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 128) + } + + // Field (6) 'Deposits' + { + subIndx := hh.Index() + num := uint64(len(b.Deposits)) + if num > 16 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Deposits { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16) + } + + // Field (7) 'VoluntaryExits' + { + subIndx := hh.Index() + num := uint64(len(b.VoluntaryExits)) + if num > 16 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.VoluntaryExits { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 16) + } + + // Field (8) 'SyncAggregate' + if err = b.SyncAggregate.HashTreeRootWith(hh); err != nil { + return + } + + // Field (9) 'ExecutionPayload' + if err = b.ExecutionPayload.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// MarshalSSZ ssz marshals the ExecutionPayload object +func (e *ExecutionPayload) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the ExecutionPayload object to a target array +func (e *ExecutionPayload) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(508) + + // Field (0) 'ParentHash' + if len(e.ParentHash) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.ParentHash...) + + // Field (1) 'FeeRecipient' + if len(e.FeeRecipient) != 20 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.FeeRecipient...) + + // Field (2) 'StateRoot' + if len(e.StateRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.StateRoot...) + + // Field (3) 'ReceiptRoot' + if len(e.ReceiptRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.ReceiptRoot...) + + // Field (4) 'LogsBloom' + if len(e.LogsBloom) != 256 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.LogsBloom...) + + // Field (5) 'Random' + if len(e.Random) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.Random...) + + // Field (6) 'BlockNumber' + dst = ssz.MarshalUint64(dst, e.BlockNumber) + + // Field (7) 'GasLimit' + dst = ssz.MarshalUint64(dst, e.GasLimit) + + // Field (8) 'GasUsed' + dst = ssz.MarshalUint64(dst, e.GasUsed) + + // Field (9) 'Timestamp' + dst = ssz.MarshalUint64(dst, e.Timestamp) + + // Offset (10) 'ExtraData' + dst = ssz.WriteOffset(dst, offset) + offset += len(e.ExtraData) + + // Field (11) 'BaseFeePerGas' + if len(e.BaseFeePerGas) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.BaseFeePerGas...) + + // Field (12) 'BlockHash' + if len(e.BlockHash) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.BlockHash...) + + // Offset (13) 'Transactions' + dst = ssz.WriteOffset(dst, offset) + for ii := 0; ii < len(e.Transactions); ii++ { + offset += 4 + offset += len(e.Transactions[ii]) + } + + // Field (10) 'ExtraData' + if len(e.ExtraData) > 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.ExtraData...) + + // Field (13) 'Transactions' + if len(e.Transactions) > 1048576 { + err = ssz.ErrListTooBig + return + } + { + offset = 4 * len(e.Transactions) + for ii := 0; ii < len(e.Transactions); ii++ { + dst = ssz.WriteOffset(dst, offset) + offset += len(e.Transactions[ii]) + } + } + for ii := 0; ii < len(e.Transactions); ii++ { + if len(e.Transactions[ii]) > 1073741824 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.Transactions[ii]...) + } + + return +} + +// UnmarshalSSZ ssz unmarshals the ExecutionPayload object +func (e *ExecutionPayload) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 508 { + return ssz.ErrSize + } + + tail := buf + var o10, o13 uint64 + + // Field (0) 'ParentHash' + if cap(e.ParentHash) == 0 { + e.ParentHash = make([]byte, 0, len(buf[0:32])) + } + e.ParentHash = append(e.ParentHash, buf[0:32]...) + + // Field (1) 'FeeRecipient' + if cap(e.FeeRecipient) == 0 { + e.FeeRecipient = make([]byte, 0, len(buf[32:52])) + } + e.FeeRecipient = append(e.FeeRecipient, buf[32:52]...) + + // Field (2) 'StateRoot' + if cap(e.StateRoot) == 0 { + e.StateRoot = make([]byte, 0, len(buf[52:84])) + } + e.StateRoot = append(e.StateRoot, buf[52:84]...) + + // Field (3) 'ReceiptRoot' + if cap(e.ReceiptRoot) == 0 { + e.ReceiptRoot = make([]byte, 0, len(buf[84:116])) + } + e.ReceiptRoot = append(e.ReceiptRoot, buf[84:116]...) + + // Field (4) 'LogsBloom' + if cap(e.LogsBloom) == 0 { + e.LogsBloom = make([]byte, 0, len(buf[116:372])) + } + e.LogsBloom = append(e.LogsBloom, buf[116:372]...) + + // Field (5) 'Random' + if cap(e.Random) == 0 { + e.Random = make([]byte, 0, len(buf[372:404])) + } + e.Random = append(e.Random, buf[372:404]...) + + // Field (6) 'BlockNumber' + e.BlockNumber = ssz.UnmarshallUint64(buf[404:412]) + + // Field (7) 'GasLimit' + e.GasLimit = ssz.UnmarshallUint64(buf[412:420]) + + // Field (8) 'GasUsed' + e.GasUsed = ssz.UnmarshallUint64(buf[420:428]) + + // Field (9) 'Timestamp' + e.Timestamp = ssz.UnmarshallUint64(buf[428:436]) + + // Offset (10) 'ExtraData' + if o10 = ssz.ReadOffset(buf[436:440]); o10 > size { + return ssz.ErrOffset + } + + if o10 < 508 { + return ssz.ErrInvalidVariableOffset + } + + // Field (11) 'BaseFeePerGas' + if cap(e.BaseFeePerGas) == 0 { + e.BaseFeePerGas = make([]byte, 0, len(buf[440:472])) + } + e.BaseFeePerGas = append(e.BaseFeePerGas, buf[440:472]...) + + // Field (12) 'BlockHash' + if cap(e.BlockHash) == 0 { + e.BlockHash = make([]byte, 0, len(buf[472:504])) + } + e.BlockHash = append(e.BlockHash, buf[472:504]...) + + // Offset (13) 'Transactions' + if o13 = ssz.ReadOffset(buf[504:508]); o13 > size || o10 > o13 { + return ssz.ErrOffset + } + + // Field (10) 'ExtraData' + { + buf = tail[o10:o13] + if len(buf) > 32 { + return ssz.ErrBytesLength + } + if cap(e.ExtraData) == 0 { + e.ExtraData = make([]byte, 0, len(buf)) + } + e.ExtraData = append(e.ExtraData, buf...) + } + + // Field (13) 'Transactions' + { + buf = tail[o13:] + num, err := ssz.DecodeDynamicLength(buf, 1048576) + if err != nil { + return err + } + e.Transactions = make([][]byte, num) + err = ssz.UnmarshalDynamic(buf, num, func(indx int, buf []byte) (err error) { + if len(buf) > 1073741824 { + return ssz.ErrBytesLength + } + if cap(e.Transactions[indx]) == 0 { + e.Transactions[indx] = make([]byte, 0, len(buf)) + } + e.Transactions[indx] = append(e.Transactions[indx], buf...) + return nil + }) + if err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayload object +func (e *ExecutionPayload) SizeSSZ() (size int) { + size = 508 + + // Field (10) 'ExtraData' + size += len(e.ExtraData) + + // Field (13) 'Transactions' + for ii := 0; ii < len(e.Transactions); ii++ { + size += 4 + size += len(e.Transactions[ii]) + } + + return +} + +// HashTreeRoot ssz hashes the ExecutionPayload object +func (e *ExecutionPayload) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the ExecutionPayload object with a hasher +func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) { + indx := hh.Index() + + // Field (0) 'ParentHash' + if len(e.ParentHash) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.ParentHash) + + // Field (1) 'FeeRecipient' + if len(e.FeeRecipient) != 20 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.FeeRecipient) + + // Field (2) 'StateRoot' + if len(e.StateRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.StateRoot) + + // Field (3) 'ReceiptRoot' + if len(e.ReceiptRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.ReceiptRoot) + + // Field (4) 'LogsBloom' + if len(e.LogsBloom) != 256 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.LogsBloom) + + // Field (5) 'Random' + if len(e.Random) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.Random) + + // Field (6) 'BlockNumber' + hh.PutUint64(e.BlockNumber) + + // Field (7) 'GasLimit' + hh.PutUint64(e.GasLimit) + + // Field (8) 'GasUsed' + hh.PutUint64(e.GasUsed) + + // Field (9) 'Timestamp' + hh.PutUint64(e.Timestamp) + + // Field (10) 'ExtraData' + { + elemIndx := hh.Index() + byteLen := uint64(len(e.ExtraData)) + if byteLen > 32 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(e.ExtraData) + hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32) + } + + // Field (11) 'BaseFeePerGas' + if len(e.BaseFeePerGas) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.BaseFeePerGas) + + // Field (12) 'BlockHash' + if len(e.BlockHash) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.BlockHash) + + // Field (13) 'Transactions' + { + subIndx := hh.Index() + num := uint64(len(e.Transactions)) + if num > 1048576 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range e.Transactions { + { + elemIndx := hh.Index() + byteLen := uint64(len(elem)) + if byteLen > 1073741824 { + err = ssz.ErrIncorrectListSize + return + } + hh.AppendBytes32(elem) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32) + } + } + hh.MerkleizeWithMixin(subIndx, num, 1048576) + } + + hh.Merkleize(indx) + return +} + // MarshalSSZ ssz marshals the Deposit_Data object func (d *Deposit_Data) MarshalSSZ() ([]byte, error) { return ssz.MarshalSSZ(d) @@ -5332,6 +6451,1141 @@ func (s *SyncAggregatorSelectionData) HashTreeRootWith(hh *ssz.Hasher) (err erro return } +// MarshalSSZ ssz marshals the BeaconStateMerge object +func (b *BeaconStateMerge) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(b) +} + +// MarshalSSZTo ssz marshals the BeaconStateMerge object to a target array +func (b *BeaconStateMerge) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(2736633) + + // Field (0) 'GenesisTime' + dst = ssz.MarshalUint64(dst, b.GenesisTime) + + // Field (1) 'GenesisValidatorsRoot' + if len(b.GenesisValidatorsRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.GenesisValidatorsRoot...) + + // Field (2) 'Slot' + dst = ssz.MarshalUint64(dst, uint64(b.Slot)) + + // Field (3) 'Fork' + if b.Fork == nil { + b.Fork = new(Fork) + } + if dst, err = b.Fork.MarshalSSZTo(dst); err != nil { + return + } + + // Field (4) 'LatestBlockHeader' + if b.LatestBlockHeader == nil { + b.LatestBlockHeader = new(BeaconBlockHeader) + } + if dst, err = b.LatestBlockHeader.MarshalSSZTo(dst); err != nil { + return + } + + // Field (5) 'BlockRoots' + if len(b.BlockRoots) != 8192 { + err = ssz.ErrVectorLength + return + } + for ii := 0; ii < 8192; ii++ { + if len(b.BlockRoots[ii]) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.BlockRoots[ii]...) + } + + // Field (6) 'StateRoots' + if len(b.StateRoots) != 8192 { + err = ssz.ErrVectorLength + return + } + for ii := 0; ii < 8192; ii++ { + if len(b.StateRoots[ii]) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.StateRoots[ii]...) + } + + // Offset (7) 'HistoricalRoots' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.HistoricalRoots) * 32 + + // Field (8) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if dst, err = b.Eth1Data.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (9) 'Eth1DataVotes' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Eth1DataVotes) * 72 + + // Field (10) 'Eth1DepositIndex' + dst = ssz.MarshalUint64(dst, b.Eth1DepositIndex) + + // Offset (11) 'Validators' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Validators) * 121 + + // Offset (12) 'Balances' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.Balances) * 8 + + // Field (13) 'RandaoMixes' + if len(b.RandaoMixes) != 65536 { + err = ssz.ErrVectorLength + return + } + for ii := 0; ii < 65536; ii++ { + if len(b.RandaoMixes[ii]) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.RandaoMixes[ii]...) + } + + // Field (14) 'Slashings' + if len(b.Slashings) != 8192 { + err = ssz.ErrVectorLength + return + } + for ii := 0; ii < 8192; ii++ { + dst = ssz.MarshalUint64(dst, b.Slashings[ii]) + } + + // Offset (15) 'PreviousEpochParticipation' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.PreviousEpochParticipation) + + // Offset (16) 'CurrentEpochParticipation' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.CurrentEpochParticipation) + + // Field (17) 'JustificationBits' + if len(b.JustificationBits) != 1 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.JustificationBits...) + + // Field (18) 'PreviousJustifiedCheckpoint' + if b.PreviousJustifiedCheckpoint == nil { + b.PreviousJustifiedCheckpoint = new(Checkpoint) + } + if dst, err = b.PreviousJustifiedCheckpoint.MarshalSSZTo(dst); err != nil { + return + } + + // Field (19) 'CurrentJustifiedCheckpoint' + if b.CurrentJustifiedCheckpoint == nil { + b.CurrentJustifiedCheckpoint = new(Checkpoint) + } + if dst, err = b.CurrentJustifiedCheckpoint.MarshalSSZTo(dst); err != nil { + return + } + + // Field (20) 'FinalizedCheckpoint' + if b.FinalizedCheckpoint == nil { + b.FinalizedCheckpoint = new(Checkpoint) + } + if dst, err = b.FinalizedCheckpoint.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (21) 'InactivityScores' + dst = ssz.WriteOffset(dst, offset) + offset += len(b.InactivityScores) * 8 + + // Field (22) 'CurrentSyncCommittee' + if b.CurrentSyncCommittee == nil { + b.CurrentSyncCommittee = new(SyncCommittee) + } + if dst, err = b.CurrentSyncCommittee.MarshalSSZTo(dst); err != nil { + return + } + + // Field (23) 'NextSyncCommittee' + if b.NextSyncCommittee == nil { + b.NextSyncCommittee = new(SyncCommittee) + } + if dst, err = b.NextSyncCommittee.MarshalSSZTo(dst); err != nil { + return + } + + // Offset (24) 'LatestExecutionPayloadHeader' + dst = ssz.WriteOffset(dst, offset) + if b.LatestExecutionPayloadHeader == nil { + b.LatestExecutionPayloadHeader = new(ExecutionPayloadHeader) + } + offset += b.LatestExecutionPayloadHeader.SizeSSZ() + + // Field (7) 'HistoricalRoots' + if len(b.HistoricalRoots) > 16777216 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.HistoricalRoots); ii++ { + if len(b.HistoricalRoots[ii]) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.HistoricalRoots[ii]...) + } + + // Field (9) 'Eth1DataVotes' + if len(b.Eth1DataVotes) > 2048 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.Eth1DataVotes); ii++ { + if dst, err = b.Eth1DataVotes[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (11) 'Validators' + if len(b.Validators) > 1099511627776 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.Validators); ii++ { + if dst, err = b.Validators[ii].MarshalSSZTo(dst); err != nil { + return + } + } + + // Field (12) 'Balances' + if len(b.Balances) > 1099511627776 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.Balances); ii++ { + dst = ssz.MarshalUint64(dst, b.Balances[ii]) + } + + // Field (15) 'PreviousEpochParticipation' + if len(b.PreviousEpochParticipation) > 1099511627776 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.PreviousEpochParticipation...) + + // Field (16) 'CurrentEpochParticipation' + if len(b.CurrentEpochParticipation) > 1099511627776 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, b.CurrentEpochParticipation...) + + // Field (21) 'InactivityScores' + if len(b.InactivityScores) > 1099511627776 { + err = ssz.ErrListTooBig + return + } + for ii := 0; ii < len(b.InactivityScores); ii++ { + dst = ssz.MarshalUint64(dst, b.InactivityScores[ii]) + } + + // Field (24) 'LatestExecutionPayloadHeader' + if dst, err = b.LatestExecutionPayloadHeader.MarshalSSZTo(dst); err != nil { + return + } + + return +} + +// UnmarshalSSZ ssz unmarshals the BeaconStateMerge object +func (b *BeaconStateMerge) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 2736633 { + return ssz.ErrSize + } + + tail := buf + var o7, o9, o11, o12, o15, o16, o21, o24 uint64 + + // Field (0) 'GenesisTime' + b.GenesisTime = ssz.UnmarshallUint64(buf[0:8]) + + // Field (1) 'GenesisValidatorsRoot' + if cap(b.GenesisValidatorsRoot) == 0 { + b.GenesisValidatorsRoot = make([]byte, 0, len(buf[8:40])) + } + b.GenesisValidatorsRoot = append(b.GenesisValidatorsRoot, buf[8:40]...) + + // Field (2) 'Slot' + b.Slot = github_com_prysmaticlabs_eth2_types.Slot(ssz.UnmarshallUint64(buf[40:48])) + + // Field (3) 'Fork' + if b.Fork == nil { + b.Fork = new(Fork) + } + if err = b.Fork.UnmarshalSSZ(buf[48:64]); err != nil { + return err + } + + // Field (4) 'LatestBlockHeader' + if b.LatestBlockHeader == nil { + b.LatestBlockHeader = new(BeaconBlockHeader) + } + if err = b.LatestBlockHeader.UnmarshalSSZ(buf[64:176]); err != nil { + return err + } + + // Field (5) 'BlockRoots' + b.BlockRoots = make([][]byte, 8192) + for ii := 0; ii < 8192; ii++ { + if cap(b.BlockRoots[ii]) == 0 { + b.BlockRoots[ii] = make([]byte, 0, len(buf[176:262320][ii*32:(ii+1)*32])) + } + b.BlockRoots[ii] = append(b.BlockRoots[ii], buf[176:262320][ii*32:(ii+1)*32]...) + } + + // Field (6) 'StateRoots' + b.StateRoots = make([][]byte, 8192) + for ii := 0; ii < 8192; ii++ { + if cap(b.StateRoots[ii]) == 0 { + b.StateRoots[ii] = make([]byte, 0, len(buf[262320:524464][ii*32:(ii+1)*32])) + } + b.StateRoots[ii] = append(b.StateRoots[ii], buf[262320:524464][ii*32:(ii+1)*32]...) + } + + // Offset (7) 'HistoricalRoots' + if o7 = ssz.ReadOffset(buf[524464:524468]); o7 > size { + return ssz.ErrOffset + } + + if o7 < 2736633 { + return ssz.ErrInvalidVariableOffset + } + + // Field (8) 'Eth1Data' + if b.Eth1Data == nil { + b.Eth1Data = new(Eth1Data) + } + if err = b.Eth1Data.UnmarshalSSZ(buf[524468:524540]); err != nil { + return err + } + + // Offset (9) 'Eth1DataVotes' + if o9 = ssz.ReadOffset(buf[524540:524544]); o9 > size || o7 > o9 { + return ssz.ErrOffset + } + + // Field (10) 'Eth1DepositIndex' + b.Eth1DepositIndex = ssz.UnmarshallUint64(buf[524544:524552]) + + // Offset (11) 'Validators' + if o11 = ssz.ReadOffset(buf[524552:524556]); o11 > size || o9 > o11 { + return ssz.ErrOffset + } + + // Offset (12) 'Balances' + if o12 = ssz.ReadOffset(buf[524556:524560]); o12 > size || o11 > o12 { + return ssz.ErrOffset + } + + // Field (13) 'RandaoMixes' + b.RandaoMixes = make([][]byte, 65536) + for ii := 0; ii < 65536; ii++ { + if cap(b.RandaoMixes[ii]) == 0 { + b.RandaoMixes[ii] = make([]byte, 0, len(buf[524560:2621712][ii*32:(ii+1)*32])) + } + b.RandaoMixes[ii] = append(b.RandaoMixes[ii], buf[524560:2621712][ii*32:(ii+1)*32]...) + } + + // Field (14) 'Slashings' + b.Slashings = ssz.ExtendUint64(b.Slashings, 8192) + for ii := 0; ii < 8192; ii++ { + b.Slashings[ii] = ssz.UnmarshallUint64(buf[2621712:2687248][ii*8 : (ii+1)*8]) + } + + // Offset (15) 'PreviousEpochParticipation' + if o15 = ssz.ReadOffset(buf[2687248:2687252]); o15 > size || o12 > o15 { + return ssz.ErrOffset + } + + // Offset (16) 'CurrentEpochParticipation' + if o16 = ssz.ReadOffset(buf[2687252:2687256]); o16 > size || o15 > o16 { + return ssz.ErrOffset + } + + // Field (17) 'JustificationBits' + if cap(b.JustificationBits) == 0 { + b.JustificationBits = make([]byte, 0, len(buf[2687256:2687257])) + } + b.JustificationBits = append(b.JustificationBits, buf[2687256:2687257]...) + + // Field (18) 'PreviousJustifiedCheckpoint' + if b.PreviousJustifiedCheckpoint == nil { + b.PreviousJustifiedCheckpoint = new(Checkpoint) + } + if err = b.PreviousJustifiedCheckpoint.UnmarshalSSZ(buf[2687257:2687297]); err != nil { + return err + } + + // Field (19) 'CurrentJustifiedCheckpoint' + if b.CurrentJustifiedCheckpoint == nil { + b.CurrentJustifiedCheckpoint = new(Checkpoint) + } + if err = b.CurrentJustifiedCheckpoint.UnmarshalSSZ(buf[2687297:2687337]); err != nil { + return err + } + + // Field (20) 'FinalizedCheckpoint' + if b.FinalizedCheckpoint == nil { + b.FinalizedCheckpoint = new(Checkpoint) + } + if err = b.FinalizedCheckpoint.UnmarshalSSZ(buf[2687337:2687377]); err != nil { + return err + } + + // Offset (21) 'InactivityScores' + if o21 = ssz.ReadOffset(buf[2687377:2687381]); o21 > size || o16 > o21 { + return ssz.ErrOffset + } + + // Field (22) 'CurrentSyncCommittee' + if b.CurrentSyncCommittee == nil { + b.CurrentSyncCommittee = new(SyncCommittee) + } + if err = b.CurrentSyncCommittee.UnmarshalSSZ(buf[2687381:2712005]); err != nil { + return err + } + + // Field (23) 'NextSyncCommittee' + if b.NextSyncCommittee == nil { + b.NextSyncCommittee = new(SyncCommittee) + } + if err = b.NextSyncCommittee.UnmarshalSSZ(buf[2712005:2736629]); err != nil { + return err + } + + // Offset (24) 'LatestExecutionPayloadHeader' + if o24 = ssz.ReadOffset(buf[2736629:2736633]); o24 > size || o21 > o24 { + return ssz.ErrOffset + } + + // Field (7) 'HistoricalRoots' + { + buf = tail[o7:o9] + num, err := ssz.DivideInt2(len(buf), 32, 16777216) + if err != nil { + return err + } + b.HistoricalRoots = make([][]byte, num) + for ii := 0; ii < num; ii++ { + if cap(b.HistoricalRoots[ii]) == 0 { + b.HistoricalRoots[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32])) + } + b.HistoricalRoots[ii] = append(b.HistoricalRoots[ii], buf[ii*32:(ii+1)*32]...) + } + } + + // Field (9) 'Eth1DataVotes' + { + buf = tail[o9:o11] + num, err := ssz.DivideInt2(len(buf), 72, 2048) + if err != nil { + return err + } + b.Eth1DataVotes = make([]*Eth1Data, num) + for ii := 0; ii < num; ii++ { + if b.Eth1DataVotes[ii] == nil { + b.Eth1DataVotes[ii] = new(Eth1Data) + } + if err = b.Eth1DataVotes[ii].UnmarshalSSZ(buf[ii*72 : (ii+1)*72]); err != nil { + return err + } + } + } + + // Field (11) 'Validators' + { + buf = tail[o11:o12] + num, err := ssz.DivideInt2(len(buf), 121, 1099511627776) + if err != nil { + return err + } + b.Validators = make([]*Validator, num) + for ii := 0; ii < num; ii++ { + if b.Validators[ii] == nil { + b.Validators[ii] = new(Validator) + } + if err = b.Validators[ii].UnmarshalSSZ(buf[ii*121 : (ii+1)*121]); err != nil { + return err + } + } + } + + // Field (12) 'Balances' + { + buf = tail[o12:o15] + num, err := ssz.DivideInt2(len(buf), 8, 1099511627776) + if err != nil { + return err + } + b.Balances = ssz.ExtendUint64(b.Balances, num) + for ii := 0; ii < num; ii++ { + b.Balances[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8]) + } + } + + // Field (15) 'PreviousEpochParticipation' + { + buf = tail[o15:o16] + if len(buf) > 1099511627776 { + return ssz.ErrBytesLength + } + if cap(b.PreviousEpochParticipation) == 0 { + b.PreviousEpochParticipation = make([]byte, 0, len(buf)) + } + b.PreviousEpochParticipation = append(b.PreviousEpochParticipation, buf...) + } + + // Field (16) 'CurrentEpochParticipation' + { + buf = tail[o16:o21] + if len(buf) > 1099511627776 { + return ssz.ErrBytesLength + } + if cap(b.CurrentEpochParticipation) == 0 { + b.CurrentEpochParticipation = make([]byte, 0, len(buf)) + } + b.CurrentEpochParticipation = append(b.CurrentEpochParticipation, buf...) + } + + // Field (21) 'InactivityScores' + { + buf = tail[o21:o24] + num, err := ssz.DivideInt2(len(buf), 8, 1099511627776) + if err != nil { + return err + } + b.InactivityScores = ssz.ExtendUint64(b.InactivityScores, num) + for ii := 0; ii < num; ii++ { + b.InactivityScores[ii] = ssz.UnmarshallUint64(buf[ii*8 : (ii+1)*8]) + } + } + + // Field (24) 'LatestExecutionPayloadHeader' + { + buf = tail[o24:] + if b.LatestExecutionPayloadHeader == nil { + b.LatestExecutionPayloadHeader = new(ExecutionPayloadHeader) + } + if err = b.LatestExecutionPayloadHeader.UnmarshalSSZ(buf); err != nil { + return err + } + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the BeaconStateMerge object +func (b *BeaconStateMerge) SizeSSZ() (size int) { + size = 2736633 + + // Field (7) 'HistoricalRoots' + size += len(b.HistoricalRoots) * 32 + + // Field (9) 'Eth1DataVotes' + size += len(b.Eth1DataVotes) * 72 + + // Field (11) 'Validators' + size += len(b.Validators) * 121 + + // Field (12) 'Balances' + size += len(b.Balances) * 8 + + // Field (15) 'PreviousEpochParticipation' + size += len(b.PreviousEpochParticipation) + + // Field (16) 'CurrentEpochParticipation' + size += len(b.CurrentEpochParticipation) + + // Field (21) 'InactivityScores' + size += len(b.InactivityScores) * 8 + + // Field (24) 'LatestExecutionPayloadHeader' + if b.LatestExecutionPayloadHeader == nil { + b.LatestExecutionPayloadHeader = new(ExecutionPayloadHeader) + } + size += b.LatestExecutionPayloadHeader.SizeSSZ() + + return +} + +// HashTreeRoot ssz hashes the BeaconStateMerge object +func (b *BeaconStateMerge) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(b) +} + +// HashTreeRootWith ssz hashes the BeaconStateMerge object with a hasher +func (b *BeaconStateMerge) HashTreeRootWith(hh *ssz.Hasher) (err error) { + indx := hh.Index() + + // Field (0) 'GenesisTime' + hh.PutUint64(b.GenesisTime) + + // Field (1) 'GenesisValidatorsRoot' + if len(b.GenesisValidatorsRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(b.GenesisValidatorsRoot) + + // Field (2) 'Slot' + hh.PutUint64(uint64(b.Slot)) + + // Field (3) 'Fork' + if err = b.Fork.HashTreeRootWith(hh); err != nil { + return + } + + // Field (4) 'LatestBlockHeader' + if err = b.LatestBlockHeader.HashTreeRootWith(hh); err != nil { + return + } + + // Field (5) 'BlockRoots' + { + if len(b.BlockRoots) != 8192 { + err = ssz.ErrVectorLength + return + } + subIndx := hh.Index() + for _, i := range b.BlockRoots { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (6) 'StateRoots' + { + if len(b.StateRoots) != 8192 { + err = ssz.ErrVectorLength + return + } + subIndx := hh.Index() + for _, i := range b.StateRoots { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (7) 'HistoricalRoots' + { + if len(b.HistoricalRoots) > 16777216 { + err = ssz.ErrListTooBig + return + } + subIndx := hh.Index() + for _, i := range b.HistoricalRoots { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + numItems := uint64(len(b.HistoricalRoots)) + hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(16777216, numItems, 32)) + } + + // Field (8) 'Eth1Data' + if err = b.Eth1Data.HashTreeRootWith(hh); err != nil { + return + } + + // Field (9) 'Eth1DataVotes' + { + subIndx := hh.Index() + num := uint64(len(b.Eth1DataVotes)) + if num > 2048 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Eth1DataVotes { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 2048) + } + + // Field (10) 'Eth1DepositIndex' + hh.PutUint64(b.Eth1DepositIndex) + + // Field (11) 'Validators' + { + subIndx := hh.Index() + num := uint64(len(b.Validators)) + if num > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + for _, elem := range b.Validators { + if err = elem.HashTreeRootWith(hh); err != nil { + return + } + } + hh.MerkleizeWithMixin(subIndx, num, 1099511627776) + } + + // Field (12) 'Balances' + { + if len(b.Balances) > 1099511627776 { + err = ssz.ErrListTooBig + return + } + subIndx := hh.Index() + for _, i := range b.Balances { + hh.AppendUint64(i) + } + hh.FillUpTo32() + numItems := uint64(len(b.Balances)) + hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(1099511627776, numItems, 8)) + } + + // Field (13) 'RandaoMixes' + { + if len(b.RandaoMixes) != 65536 { + err = ssz.ErrVectorLength + return + } + subIndx := hh.Index() + for _, i := range b.RandaoMixes { + if len(i) != 32 { + err = ssz.ErrBytesLength + return + } + hh.Append(i) + } + hh.Merkleize(subIndx) + } + + // Field (14) 'Slashings' + { + if len(b.Slashings) != 8192 { + err = ssz.ErrVectorLength + return + } + subIndx := hh.Index() + for _, i := range b.Slashings { + hh.AppendUint64(i) + } + hh.Merkleize(subIndx) + } + + // Field (15) 'PreviousEpochParticipation' + { + elemIndx := hh.Index() + byteLen := uint64(len(b.PreviousEpochParticipation)) + if byteLen > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(b.PreviousEpochParticipation) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1099511627776+31)/32) + } + + // Field (16) 'CurrentEpochParticipation' + { + elemIndx := hh.Index() + byteLen := uint64(len(b.CurrentEpochParticipation)) + if byteLen > 1099511627776 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(b.CurrentEpochParticipation) + hh.MerkleizeWithMixin(elemIndx, byteLen, (1099511627776+31)/32) + } + + // Field (17) 'JustificationBits' + if len(b.JustificationBits) != 1 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(b.JustificationBits) + + // Field (18) 'PreviousJustifiedCheckpoint' + if err = b.PreviousJustifiedCheckpoint.HashTreeRootWith(hh); err != nil { + return + } + + // Field (19) 'CurrentJustifiedCheckpoint' + if err = b.CurrentJustifiedCheckpoint.HashTreeRootWith(hh); err != nil { + return + } + + // Field (20) 'FinalizedCheckpoint' + if err = b.FinalizedCheckpoint.HashTreeRootWith(hh); err != nil { + return + } + + // Field (21) 'InactivityScores' + { + if len(b.InactivityScores) > 1099511627776 { + err = ssz.ErrListTooBig + return + } + subIndx := hh.Index() + for _, i := range b.InactivityScores { + hh.AppendUint64(i) + } + hh.FillUpTo32() + numItems := uint64(len(b.InactivityScores)) + hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(1099511627776, numItems, 8)) + } + + // Field (22) 'CurrentSyncCommittee' + if err = b.CurrentSyncCommittee.HashTreeRootWith(hh); err != nil { + return + } + + // Field (23) 'NextSyncCommittee' + if err = b.NextSyncCommittee.HashTreeRootWith(hh); err != nil { + return + } + + // Field (24) 'LatestExecutionPayloadHeader' + if err = b.LatestExecutionPayloadHeader.HashTreeRootWith(hh); err != nil { + return + } + + hh.Merkleize(indx) + return +} + +// MarshalSSZ ssz marshals the ExecutionPayloadHeader object +func (e *ExecutionPayloadHeader) MarshalSSZ() ([]byte, error) { + return ssz.MarshalSSZ(e) +} + +// MarshalSSZTo ssz marshals the ExecutionPayloadHeader object to a target array +func (e *ExecutionPayloadHeader) MarshalSSZTo(buf []byte) (dst []byte, err error) { + dst = buf + offset := int(536) + + // Field (0) 'ParentHash' + if len(e.ParentHash) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.ParentHash...) + + // Field (1) 'FeeRecipient' + if len(e.FeeRecipient) != 20 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.FeeRecipient...) + + // Field (2) 'StateRoot' + if len(e.StateRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.StateRoot...) + + // Field (3) 'ReceiptRoot' + if len(e.ReceiptRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.ReceiptRoot...) + + // Field (4) 'LogsBloom' + if len(e.LogsBloom) != 256 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.LogsBloom...) + + // Field (5) 'Random' + if len(e.Random) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.Random...) + + // Field (6) 'BlockNumber' + dst = ssz.MarshalUint64(dst, e.BlockNumber) + + // Field (7) 'GasLimit' + dst = ssz.MarshalUint64(dst, e.GasLimit) + + // Field (8) 'GasUsed' + dst = ssz.MarshalUint64(dst, e.GasUsed) + + // Field (9) 'Timestamp' + dst = ssz.MarshalUint64(dst, e.Timestamp) + + // Offset (10) 'ExtraData' + dst = ssz.WriteOffset(dst, offset) + offset += len(e.ExtraData) + + // Field (11) 'BaseFeePerGas' + if len(e.BaseFeePerGas) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.BaseFeePerGas...) + + // Field (12) 'BlockHash' + if len(e.BlockHash) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.BlockHash...) + + // Field (13) 'TransactionsRoot' + if len(e.TransactionsRoot) != 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.TransactionsRoot...) + + // Field (10) 'ExtraData' + if len(e.ExtraData) > 32 { + err = ssz.ErrBytesLength + return + } + dst = append(dst, e.ExtraData...) + + return +} + +// UnmarshalSSZ ssz unmarshals the ExecutionPayloadHeader object +func (e *ExecutionPayloadHeader) UnmarshalSSZ(buf []byte) error { + var err error + size := uint64(len(buf)) + if size < 536 { + return ssz.ErrSize + } + + tail := buf + var o10 uint64 + + // Field (0) 'ParentHash' + if cap(e.ParentHash) == 0 { + e.ParentHash = make([]byte, 0, len(buf[0:32])) + } + e.ParentHash = append(e.ParentHash, buf[0:32]...) + + // Field (1) 'FeeRecipient' + if cap(e.FeeRecipient) == 0 { + e.FeeRecipient = make([]byte, 0, len(buf[32:52])) + } + e.FeeRecipient = append(e.FeeRecipient, buf[32:52]...) + + // Field (2) 'StateRoot' + if cap(e.StateRoot) == 0 { + e.StateRoot = make([]byte, 0, len(buf[52:84])) + } + e.StateRoot = append(e.StateRoot, buf[52:84]...) + + // Field (3) 'ReceiptRoot' + if cap(e.ReceiptRoot) == 0 { + e.ReceiptRoot = make([]byte, 0, len(buf[84:116])) + } + e.ReceiptRoot = append(e.ReceiptRoot, buf[84:116]...) + + // Field (4) 'LogsBloom' + if cap(e.LogsBloom) == 0 { + e.LogsBloom = make([]byte, 0, len(buf[116:372])) + } + e.LogsBloom = append(e.LogsBloom, buf[116:372]...) + + // Field (5) 'Random' + if cap(e.Random) == 0 { + e.Random = make([]byte, 0, len(buf[372:404])) + } + e.Random = append(e.Random, buf[372:404]...) + + // Field (6) 'BlockNumber' + e.BlockNumber = ssz.UnmarshallUint64(buf[404:412]) + + // Field (7) 'GasLimit' + e.GasLimit = ssz.UnmarshallUint64(buf[412:420]) + + // Field (8) 'GasUsed' + e.GasUsed = ssz.UnmarshallUint64(buf[420:428]) + + // Field (9) 'Timestamp' + e.Timestamp = ssz.UnmarshallUint64(buf[428:436]) + + // Offset (10) 'ExtraData' + if o10 = ssz.ReadOffset(buf[436:440]); o10 > size { + return ssz.ErrOffset + } + + if o10 < 536 { + return ssz.ErrInvalidVariableOffset + } + + // Field (11) 'BaseFeePerGas' + if cap(e.BaseFeePerGas) == 0 { + e.BaseFeePerGas = make([]byte, 0, len(buf[440:472])) + } + e.BaseFeePerGas = append(e.BaseFeePerGas, buf[440:472]...) + + // Field (12) 'BlockHash' + if cap(e.BlockHash) == 0 { + e.BlockHash = make([]byte, 0, len(buf[472:504])) + } + e.BlockHash = append(e.BlockHash, buf[472:504]...) + + // Field (13) 'TransactionsRoot' + if cap(e.TransactionsRoot) == 0 { + e.TransactionsRoot = make([]byte, 0, len(buf[504:536])) + } + e.TransactionsRoot = append(e.TransactionsRoot, buf[504:536]...) + + // Field (10) 'ExtraData' + { + buf = tail[o10:] + if len(buf) > 32 { + return ssz.ErrBytesLength + } + if cap(e.ExtraData) == 0 { + e.ExtraData = make([]byte, 0, len(buf)) + } + e.ExtraData = append(e.ExtraData, buf...) + } + return err +} + +// SizeSSZ returns the ssz encoded size in bytes for the ExecutionPayloadHeader object +func (e *ExecutionPayloadHeader) SizeSSZ() (size int) { + size = 536 + + // Field (10) 'ExtraData' + size += len(e.ExtraData) + + return +} + +// HashTreeRoot ssz hashes the ExecutionPayloadHeader object +func (e *ExecutionPayloadHeader) HashTreeRoot() ([32]byte, error) { + return ssz.HashWithDefaultHasher(e) +} + +// HashTreeRootWith ssz hashes the ExecutionPayloadHeader object with a hasher +func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) { + indx := hh.Index() + + // Field (0) 'ParentHash' + if len(e.ParentHash) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.ParentHash) + + // Field (1) 'FeeRecipient' + if len(e.FeeRecipient) != 20 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.FeeRecipient) + + // Field (2) 'StateRoot' + if len(e.StateRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.StateRoot) + + // Field (3) 'ReceiptRoot' + if len(e.ReceiptRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.ReceiptRoot) + + // Field (4) 'LogsBloom' + if len(e.LogsBloom) != 256 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.LogsBloom) + + // Field (5) 'Random' + if len(e.Random) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.Random) + + // Field (6) 'BlockNumber' + hh.PutUint64(e.BlockNumber) + + // Field (7) 'GasLimit' + hh.PutUint64(e.GasLimit) + + // Field (8) 'GasUsed' + hh.PutUint64(e.GasUsed) + + // Field (9) 'Timestamp' + hh.PutUint64(e.Timestamp) + + // Field (10) 'ExtraData' + { + elemIndx := hh.Index() + byteLen := uint64(len(e.ExtraData)) + if byteLen > 32 { + err = ssz.ErrIncorrectListSize + return + } + hh.PutBytes(e.ExtraData) + hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32) + } + + // Field (11) 'BaseFeePerGas' + if len(e.BaseFeePerGas) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.BaseFeePerGas) + + // Field (12) 'BlockHash' + if len(e.BlockHash) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.BlockHash) + + // Field (13) 'TransactionsRoot' + if len(e.TransactionsRoot) != 32 { + err = ssz.ErrBytesLength + return + } + hh.PutBytes(e.TransactionsRoot) + + hh.Merkleize(indx) + return +} + // MarshalSSZ ssz marshals the Status object func (s *Status) MarshalSSZ() ([]byte, error) { return ssz.MarshalSSZ(s) From a2c1185032d2de8f47e8823d96dc986be04f03da Mon Sep 17 00:00:00 2001 From: Potuz Date: Tue, 23 Nov 2021 22:56:34 -0300 Subject: [PATCH 20/45] Monitor sync committee (#9923) * Add sync committeee contributions to monitor * gaz * Raul's review * Added lock around TrackedValidators * add comment to trackedIndex * add missing locks because of trackedIndex * Terence fixes 2 * moved TrackedValidator to service from config * Terence comment fix Co-authored-by: Raul Jordan --- beacon-chain/cache/sync_committee_test.go | 50 ++++-------- beacon-chain/monitor/BUILD.bazel | 3 + beacon-chain/monitor/metrics.go | 13 +++ beacon-chain/monitor/process_attestation.go | 21 +++-- .../monitor/process_attestation_test.go | 17 ++-- beacon-chain/monitor/process_block.go | 24 +++++- beacon-chain/monitor/process_block_test.go | 51 ++++++++---- beacon-chain/monitor/process_exit.go | 8 +- beacon-chain/monitor/process_exit_test.go | 32 +++----- .../monitor/process_sync_committee.go | 79 +++++++++++++++++++ .../monitor/process_sync_committee_test.go | 59 ++++++++++++++ beacon-chain/monitor/service.go | 69 ++++++++++++---- beacon-chain/monitor/service_test.go | 38 +++++++-- testing/util/sync_committee.go | 20 +++++ 14 files changed, 374 insertions(+), 110 deletions(-) create mode 100644 beacon-chain/monitor/process_sync_committee.go create mode 100644 beacon-chain/monitor/process_sync_committee_test.go diff --git a/beacon-chain/cache/sync_committee_test.go b/beacon-chain/cache/sync_committee_test.go index 9dc95fc779..9b5d7a5b67 100644 --- a/beacon-chain/cache/sync_committee_test.go +++ b/beacon-chain/cache/sync_committee_test.go @@ -5,8 +5,6 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/cache" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/testing/require" "github.com/prysmaticlabs/prysm/testing/util" @@ -28,10 +26,10 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) { }{ { name: "only current epoch", - currentSyncCommittee: convertToCommittee([][]byte{ + currentSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2], }), - nextSyncCommittee: convertToCommittee([][]byte{}), + nextSyncCommittee: util.ConvertToCommittee([][]byte{}), currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{ 1: {0}, 2: {1, 3, 4}, @@ -45,8 +43,8 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) { }, { name: "only next epoch", - currentSyncCommittee: convertToCommittee([][]byte{}), - nextSyncCommittee: convertToCommittee([][]byte{ + currentSyncCommittee: util.ConvertToCommittee([][]byte{}), + nextSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2], }), currentSyncMap: map[types.ValidatorIndex][]types.CommitteeIndex{ @@ -62,14 +60,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) { }, { name: "some current epoch and some next epoch", - currentSyncCommittee: convertToCommittee([][]byte{ + currentSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2], }), - nextSyncCommittee: convertToCommittee([][]byte{ + nextSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[7], pubKeys[6], pubKeys[5], @@ -90,14 +88,14 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) { }, { name: "some current epoch and some next epoch duplicated across", - currentSyncCommittee: convertToCommittee([][]byte{ + currentSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[1], pubKeys[2], pubKeys[3], pubKeys[2], pubKeys[2], }), - nextSyncCommittee: convertToCommittee([][]byte{ + nextSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[2], pubKeys[1], pubKeys[3], @@ -117,13 +115,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) { }, { name: "all duplicated", - currentSyncCommittee: convertToCommittee([][]byte{ + currentSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[100], pubKeys[100], pubKeys[100], pubKeys[100], }), - nextSyncCommittee: convertToCommittee([][]byte{ + nextSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[100], pubKeys[100], pubKeys[100], @@ -138,13 +136,13 @@ func TestSyncCommitteeCache_CanUpdateAndRetrieve(t *testing.T) { }, { name: "unknown keys", - currentSyncCommittee: convertToCommittee([][]byte{ + currentSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[100], pubKeys[100], pubKeys[100], pubKeys[100], }), - nextSyncCommittee: convertToCommittee([][]byte{ + nextSyncCommittee: util.ConvertToCommittee([][]byte{ pubKeys[100], pubKeys[100], pubKeys[100], @@ -189,13 +187,13 @@ func TestSyncCommitteeCache_RootDoesNotExist(t *testing.T) { func TestSyncCommitteeCache_CanRotate(t *testing.T) { c := cache.NewSyncCommittee() s, _ := util.DeterministicGenesisStateAltair(t, 64) - require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{1}}))) + require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{1}}))) require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'a'}, s)) - require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{2}}))) + require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{2}}))) require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'b'}, s)) - require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{3}}))) + require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{3}}))) require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'c'}, s)) - require.NoError(t, s.SetCurrentSyncCommittee(convertToCommittee([][]byte{{4}}))) + require.NoError(t, s.SetCurrentSyncCommittee(util.ConvertToCommittee([][]byte{{4}}))) require.NoError(t, c.UpdatePositionsInCommittee([32]byte{'d'}, s)) _, err := c.CurrentPeriodIndexPosition([32]byte{'a'}, 0) @@ -204,19 +202,3 @@ func TestSyncCommitteeCache_CanRotate(t *testing.T) { _, err = c.CurrentPeriodIndexPosition([32]byte{'c'}, 0) require.NoError(t, err) } - -func convertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee { - var pubKeys [][]byte - for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ { - if i < uint64(len(inputKeys)) { - pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength)) - } else { - pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength)) - } - } - - return ðpb.SyncCommittee{ - Pubkeys: pubKeys, - AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength), - } -} diff --git a/beacon-chain/monitor/BUILD.bazel b/beacon-chain/monitor/BUILD.bazel index b46e1ef767..a1fdf5be85 100644 --- a/beacon-chain/monitor/BUILD.bazel +++ b/beacon-chain/monitor/BUILD.bazel @@ -8,6 +8,7 @@ go_library( "process_attestation.go", "process_block.go", "process_exit.go", + "process_sync_committee.go", "service.go", ], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor", @@ -38,10 +39,12 @@ go_test( "process_attestation_test.go", "process_block_test.go", "process_exit_test.go", + "process_sync_committee_test.go", "service_test.go", ], embed = [":go_default_library"], deps = [ + "//beacon-chain/core/altair:go_default_library", "//beacon-chain/db/testing:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//config/params:go_default_library", diff --git a/beacon-chain/monitor/metrics.go b/beacon-chain/monitor/metrics.go index 61d50a1cbb..a45fb5c983 100644 --- a/beacon-chain/monitor/metrics.go +++ b/beacon-chain/monitor/metrics.go @@ -55,6 +55,7 @@ var ( "validator_index", }, ) + // proposedSlotsCounter used to track proposed blocks proposedSlotsCounter = promauto.NewCounterVec( prometheus.CounterOpts{ @@ -77,4 +78,16 @@ var ( "validator_index", }, ) + // syncCommitteeContributionCounter used to track sync committee + // contributions + syncCommitteeContributionCounter = promauto.NewCounterVec( + prometheus.CounterOpts{ + Namespace: "monitor", + Name: "sync_committee_contributions_total", + Help: "Number of Sync committee contributions performed", + }, + []string{ + "validator_index", + }, + ) ) diff --git a/beacon-chain/monitor/process_attestation.go b/beacon-chain/monitor/process_attestation.go index 364bae5d79..77a2f9dae7 100644 --- a/beacon-chain/monitor/process_attestation.go +++ b/beacon-chain/monitor/process_attestation.go @@ -20,12 +20,13 @@ import ( // updatedPerformanceFromTrackedVal returns true if the validator is tracked and if the // given slot is different than the last attested slot from this validator. +// It assumes that a read lock is held on the monitor service. func (s *Service) updatedPerformanceFromTrackedVal(idx types.ValidatorIndex, slot types.Slot) bool { - if !s.TrackedIndex(types.ValidatorIndex(idx)) { + if !s.trackedIndex(idx) { return false } - if lp, ok := s.latestPerformance[types.ValidatorIndex(idx)]; ok { + if lp, ok := s.latestPerformance[idx]; ok { return lp.attestedSlot != slot } return false @@ -73,6 +74,8 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be log.WithError(err).Error("Could not get attesting indices") return } + s.Lock() + defer s.Unlock() for _, idx := range attestingIndices { if s.updatedPerformanceFromTrackedVal(types.ValidatorIndex(idx), att.Data.Slot) { logFields := logMessageTimelyFlagsForIndex(types.ValidatorIndex(idx), att.Data) @@ -87,7 +90,7 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be aggregatedPerf.totalRequestedCount++ latestPerf := s.latestPerformance[types.ValidatorIndex(idx)] - balanceChg := balance - latestPerf.balance + balanceChg := int64(balance - latestPerf.balance) latestPerf.balanceChange = balanceChg latestPerf.balance = balance latestPerf.attestedSlot = att.Data.Slot @@ -165,10 +168,13 @@ func (s *Service) processIncludedAttestation(ctx context.Context, state state.Be // processUnaggregatedAttestation logs when the beacon node sees an unaggregated attestation from one of our // tracked validators func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb.Attestation) { + s.RLock() + defer s.RUnlock() root := bytesutil.ToBytes32(att.Data.BeaconBlockRoot) state := s.config.StateGen.StateByRootIfCachedNoCopy(root) if state == nil { - log.Debug("Skipping unaggregated attestation due to state not found in cache") + log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug( + "Skipping unaggregated attestation due to state not found in cache") return } attestingIndices, err := attestingIndices(ctx, state, att) @@ -187,7 +193,9 @@ func (s *Service) processUnaggregatedAttestation(ctx context.Context, att *ethpb // processAggregatedAttestation logs when we see an aggregation from one of our tracked validators or an aggregated // attestation from one of our tracked validators func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.AggregateAttestationAndProof) { - if s.TrackedIndex(att.AggregatorIndex) { + s.Lock() + defer s.Unlock() + if s.trackedIndex(att.AggregatorIndex) { log.WithFields(logrus.Fields{ "ValidatorIndex": att.AggregatorIndex, }).Info("Processed attestation aggregation") @@ -201,7 +209,8 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A copy(root[:], att.Aggregate.Data.BeaconBlockRoot) state := s.config.StateGen.StateByRootIfCachedNoCopy(root) if state == nil { - log.Debug("Skipping agregated attestation due to state not found in cache") + log.WithField("BeaconBlockRoot", fmt.Sprintf("%#x", bytesutil.Trunc(root[:]))).Debug( + "Skipping agregated attestation due to state not found in cache") return } attestingIndices, err := attestingIndices(ctx, state, att.Aggregate) diff --git a/beacon-chain/monitor/process_attestation_test.go b/beacon-chain/monitor/process_attestation_test.go index d991992a66..0ff19e2ba1 100644 --- a/beacon-chain/monitor/process_attestation_test.go +++ b/beacon-chain/monitor/process_attestation_test.go @@ -41,21 +41,26 @@ func setupService(t *testing.T) *Service { balance: 31900000000, }, } - aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ 1: {}, 2: {}, 12: {}, 15: {}, } - + trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{ + 1: {0, 1, 2, 3}, + 12: {4, 5}, + } return &Service{ config: &ValidatorMonitorConfig{ - StateGen: stategen.New(beaconDB), - TrackedValidators: trackedVals, + StateGen: stategen.New(beaconDB), }, - latestPerformance: latestPerformance, - aggregatedPerformance: aggregatedPerformance, + + TrackedValidators: trackedVals, + latestPerformance: latestPerformance, + aggregatedPerformance: aggregatedPerformance, + trackedSyncCommitteeIndices: trackedSyncCommitteeIndices, + lastSyncedEpoch: 0, } } diff --git a/beacon-chain/monitor/process_block.go b/beacon-chain/monitor/process_block.go index b2edb8bf67..c2aac89377 100644 --- a/beacon-chain/monitor/process_block.go +++ b/beacon-chain/monitor/process_block.go @@ -9,6 +9,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/encoding/bytesutil" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" + "github.com/prysmaticlabs/prysm/time/slots" "github.com/sirupsen/logrus" ) @@ -39,13 +40,26 @@ func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) { return } + currEpoch := slots.ToEpoch(blk.Slot()) + s.RLock() + lastSyncedEpoch := s.lastSyncedEpoch + s.RUnlock() + + if currEpoch != lastSyncedEpoch && + slots.SyncCommitteePeriod(currEpoch) == slots.SyncCommitteePeriod(lastSyncedEpoch) { + s.updateSyncCommitteeTrackedVals(state) + } + + s.processSyncAggregate(state, blk) s.processProposedBlock(state, root, blk) s.processAttestations(ctx, state, blk) } // processProposedBlock logs the event that one of our tracked validators proposed a block that was included func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, blk block.BeaconBlock) { - if s.TrackedIndex(blk.ProposerIndex()) { + s.Lock() + defer s.Unlock() + if s.trackedIndex(blk.ProposerIndex()) { // update metrics proposedSlotsCounter.WithLabelValues(fmt.Sprintf("%d", blk.ProposerIndex())).Inc() @@ -57,7 +71,7 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b } latestPerf := s.latestPerformance[blk.ProposerIndex()] - balanceChg := balance - latestPerf.balance + balanceChg := int64(balance - latestPerf.balance) latestPerf.balanceChange = balanceChg latestPerf.balance = balance s.latestPerformance[blk.ProposerIndex()] = latestPerf @@ -80,9 +94,11 @@ func (s *Service) processProposedBlock(state state.BeaconState, root [32]byte, b // processSlashings logs the event of one of our tracked validators was slashed func (s *Service) processSlashings(blk block.BeaconBlock) { + s.RLock() + defer s.RUnlock() for _, slashing := range blk.Body().ProposerSlashings() { idx := slashing.Header_1.Header.ProposerIndex - if s.TrackedIndex(idx) { + if s.trackedIndex(idx) { log.WithFields(logrus.Fields{ "ProposerIndex": idx, "Slot:": blk.Slot(), @@ -95,7 +111,7 @@ func (s *Service) processSlashings(blk block.BeaconBlock) { for _, slashing := range blk.Body().AttesterSlashings() { for _, idx := range blocks.SlashableAttesterIndices(slashing) { - if s.TrackedIndex(types.ValidatorIndex(idx)) { + if s.trackedIndex(types.ValidatorIndex(idx)) { log.WithFields(logrus.Fields{ "AttesterIndex": idx, "Slot:": blk.Slot(), diff --git a/beacon-chain/monitor/process_block_test.go b/beacon-chain/monitor/process_block_test.go index 5c9c3def08..73d31108f0 100644 --- a/beacon-chain/monitor/process_block_test.go +++ b/beacon-chain/monitor/process_block_test.go @@ -6,6 +6,7 @@ import ( "testing" types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/core/altair" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" @@ -116,11 +117,9 @@ func TestProcessSlashings(t *testing.T) { t.Run(tt.name, func(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - config: &ValidatorMonitorConfig{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, + TrackedValidators: map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, }, } s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block)) @@ -178,31 +177,55 @@ func TestProcessProposedBlock(t *testing.T) { } -func TestProcessBlock_ProposerAndSlashedTrackedVals(t *testing.T) { +func TestProcessBlock_AllEventsTrackedVals(t *testing.T) { hook := logTest.NewGlobal() ctx := context.Background() - s := setupService(t) - genesis, keys := util.DeterministicGenesisState(t, 64) + + genesis, keys := util.DeterministicGenesisStateAltair(t, 64) + c, err := altair.NextSyncCommittee(ctx, genesis) + require.NoError(t, err) + require.NoError(t, genesis.SetCurrentSyncCommittee(c)) + genConfig := util.DefaultBlockGenConfig() genConfig.NumProposerSlashings = 1 - b, err := util.GenerateFullBlock(genesis, keys, genConfig, 1) + b, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1) + require.NoError(t, err) + s := setupService(t) + + pubKeys := make([][]byte, 3) + pubKeys[0] = genesis.Validators()[0].PublicKey + pubKeys[1] = genesis.Validators()[1].PublicKey + pubKeys[2] = genesis.Validators()[2].PublicKey + + currentSyncCommittee := util.ConvertToCommittee([][]byte{ + pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1], + }) + require.NoError(t, genesis.SetCurrentSyncCommittee(currentSyncCommittee)) + idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex - if !s.TrackedIndex(idx) { - s.config.TrackedValidators[idx] = nil + s.RLock() + if !s.trackedIndex(idx) { + s.TrackedValidators[idx] = nil s.latestPerformance[idx] = ValidatorLatestPerformance{ balance: 31900000000, } s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{} } + s.RUnlock() + s.updateSyncCommitteeTrackedVals(genesis) - require.NoError(t, err) root, err := b.GetBlock().HashTreeRoot() require.NoError(t, err) require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis)) - wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0x67a9fe4d0d8d ProposerIndex=15 Slot=1 Version=0 prefix=monitor", bytesutil.Trunc(root[:])) + wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:])) wanted2 := fmt.Sprintf("\"Proposer slashing was included\" ProposerIndex=%d Root1=0x000100000000 Root2=0x000200000000 SlashingSlot=0 Slot:=1 prefix=monitor", idx) - wrapped := wrapper.WrappedPhase0SignedBeaconBlock(b) + wanted3 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=3 ExpectedContrib=3 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor" + wanted4 := "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=1 NewBalance=32000000000 ValidatorIndex=2 prefix=monitor" + wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(b) + require.NoError(t, err) s.processBlock(ctx, wrapped) require.LogsContain(t, hook, wanted1) require.LogsContain(t, hook, wanted2) + require.LogsContain(t, hook, wanted3) + require.LogsContain(t, hook, wanted4) } diff --git a/beacon-chain/monitor/process_exit.go b/beacon-chain/monitor/process_exit.go index b0b47ca705..20798c49ed 100644 --- a/beacon-chain/monitor/process_exit.go +++ b/beacon-chain/monitor/process_exit.go @@ -9,9 +9,11 @@ import ( // processExitsFromBlock logs the event of one of our tracked validators' exit was // included in a block func (s *Service) processExitsFromBlock(blk block.BeaconBlock) { + s.RLock() + defer s.RUnlock() for _, exit := range blk.Body().VoluntaryExits() { idx := exit.Exit.ValidatorIndex - if s.TrackedIndex(idx) { + if s.trackedIndex(idx) { log.WithFields(logrus.Fields{ "ValidatorIndex": idx, "Slot": blk.Slot(), @@ -23,7 +25,9 @@ func (s *Service) processExitsFromBlock(blk block.BeaconBlock) { // processExit logs the event of one of our tracked validators' exit was processed func (s *Service) processExit(exit *ethpb.SignedVoluntaryExit) { idx := exit.Exit.ValidatorIndex - if s.TrackedIndex(idx) { + s.RLock() + defer s.RUnlock() + if s.trackedIndex(idx) { log.WithFields(logrus.Fields{ "ValidatorIndex": idx, }).Info("Voluntary exit was processed") diff --git a/beacon-chain/monitor/process_exit_test.go b/beacon-chain/monitor/process_exit_test.go index 4c03782915..09f7b6e371 100644 --- a/beacon-chain/monitor/process_exit_test.go +++ b/beacon-chain/monitor/process_exit_test.go @@ -13,11 +13,9 @@ import ( func TestProcessExitsFromBlockTrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - config: &ValidatorMonitorConfig{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, + TrackedValidators: map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, }, } @@ -49,11 +47,9 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) { func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - config: &ValidatorMonitorConfig{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, + TrackedValidators: map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, }, } @@ -85,11 +81,9 @@ func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) { func TestProcessExitP2PTrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - config: &ValidatorMonitorConfig{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, + TrackedValidators: map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, }, } @@ -107,11 +101,9 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) { func TestProcessExitP2PUntrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - config: &ValidatorMonitorConfig{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, + TrackedValidators: map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, }, } diff --git a/beacon-chain/monitor/process_sync_committee.go b/beacon-chain/monitor/process_sync_committee.go new file mode 100644 index 0000000000..1c82558df0 --- /dev/null +++ b/beacon-chain/monitor/process_sync_committee.go @@ -0,0 +1,79 @@ +package monitor + +import ( + "fmt" + + "github.com/prysmaticlabs/prysm/beacon-chain/state" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" + "github.com/sirupsen/logrus" +) + +// processSyncCommitteeContribution logs the event that one of our tracked +// validators' aggregated sync contribution has been processed. +// TODO: We do not log if a sync contribution was included in an aggregate (we +// log them when they are included in blocks) +func (s *Service) processSyncCommitteeContribution(contribution *ethpb.SignedContributionAndProof) { + idx := contribution.Message.AggregatorIndex + s.Lock() + defer s.Unlock() + if s.trackedIndex(idx) { + aggPerf := s.aggregatedPerformance[idx] + aggPerf.totalSyncComitteeAggregations++ + s.aggregatedPerformance[idx] = aggPerf + + log.WithField("ValidatorIndex", contribution.Message.AggregatorIndex).Info("Sync committee aggregation processed") + } +} + +// processSyncAggregate logs the event that one of our tracked validators is a sync-committee member and its +// contribution was included +func (s *Service) processSyncAggregate(state state.BeaconState, blk block.BeaconBlock) { + if blk == nil || blk.Body() == nil { + return + } + bits, err := blk.Body().SyncAggregate() + if err != nil { + log.WithError(err).Error("Cannot get SyncAggregate") + return + } + s.Lock() + defer s.Unlock() + for validatorIdx, committeeIndices := range s.trackedSyncCommitteeIndices { + if len(committeeIndices) > 0 { + contrib := 0 + for _, idx := range committeeIndices { + if bits.SyncCommitteeBits.BitAt(uint64(idx)) { + contrib++ + } + } + + balance, err := state.BalanceAtIndex(validatorIdx) + if err != nil { + log.Error("Could not get balance") + return + } + + latestPerf := s.latestPerformance[validatorIdx] + balanceChg := int64(balance - latestPerf.balance) + latestPerf.balanceChange = balanceChg + latestPerf.balance = balance + s.latestPerformance[validatorIdx] = latestPerf + + aggPerf := s.aggregatedPerformance[validatorIdx] + aggPerf.totalSyncComitteeContributions += uint64(contrib) + s.aggregatedPerformance[validatorIdx] = aggPerf + + syncCommitteeContributionCounter.WithLabelValues( + fmt.Sprintf("%d", validatorIdx)).Add(float64(contrib)) + + log.WithFields(logrus.Fields{ + "ValidatorIndex": validatorIdx, + "ExpectedContrib": len(committeeIndices), + "Contributions": contrib, + "NewBalance": balance, + "BalanceChange": balanceChg, + }).Info("Sync committee contribution included") + } + } +} diff --git a/beacon-chain/monitor/process_sync_committee_test.go b/beacon-chain/monitor/process_sync_committee_test.go new file mode 100644 index 0000000000..a7a33da1f6 --- /dev/null +++ b/beacon-chain/monitor/process_sync_committee_test.go @@ -0,0 +1,59 @@ +package monitor + +import ( + "testing" + + "github.com/prysmaticlabs/go-bitfield" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" + "github.com/prysmaticlabs/prysm/testing/require" + "github.com/prysmaticlabs/prysm/testing/util" + logTest "github.com/sirupsen/logrus/hooks/test" +) + +func TestProcessSyncCommitteeContribution(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + + contrib := ðpb.SignedContributionAndProof{ + Message: ðpb.ContributionAndProof{ + AggregatorIndex: 1, + }, + } + + s.processSyncCommitteeContribution(contrib) + require.LogsContain(t, hook, "\"Sync committee aggregation processed\" ValidatorIndex=1") + require.LogsDoNotContain(t, hook, "ValidatorIndex=2") +} + +func TestProcessSyncAggregate(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + beaconState, _ := util.DeterministicGenesisStateAltair(t, 256) + + block := ðpb.BeaconBlockAltair{ + Slot: 2, + Body: ðpb.BeaconBlockBodyAltair{ + SyncAggregate: ðpb.SyncAggregate{ + SyncCommitteeBits: bitfield.Bitvector512{ + 0x31, 0xff, 0xff, 0xff, 0xff, 0x3f, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, + }, + }, + }, + } + + wrappedBlock, err := wrapper.WrappedAltairBeaconBlock(block) + require.NoError(t, err) + + s.processSyncAggregate(beaconState, wrappedBlock) + require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=0 Contributions=1 ExpectedContrib=4 NewBalance=32000000000 ValidatorIndex=1 prefix=monitor") + require.LogsContain(t, hook, "\"Sync committee contribution included\" BalanceChange=100000000 Contributions=2 ExpectedContrib=2 NewBalance=32000000000 ValidatorIndex=12 prefix=monitor") + require.LogsDoNotContain(t, hook, "ValidatorIndex=2") +} diff --git a/beacon-chain/monitor/service.go b/beacon-chain/monitor/service.go index 1e9c1c6f6c..c04d32103d 100644 --- a/beacon-chain/monitor/service.go +++ b/beacon-chain/monitor/service.go @@ -1,8 +1,13 @@ package monitor import ( + "sync" + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" + "github.com/prysmaticlabs/prysm/time/slots" ) // ValidatorLatestPerformance keeps track of the latest participation of the validator @@ -13,41 +18,71 @@ type ValidatorLatestPerformance struct { timelyTarget bool timelyHead bool balance uint64 - balanceChange uint64 + balanceChange int64 } // ValidatorAggregatedPerformance keeps track of the accumulated performance of // the validator since launch type ValidatorAggregatedPerformance struct { - totalAttestedCount uint64 - totalRequestedCount uint64 - totalDistance uint64 - totalCorrectSource uint64 - totalCorrectTarget uint64 - totalCorrectHead uint64 - totalProposedCount uint64 - totalAggregations uint64 + totalAttestedCount uint64 + totalRequestedCount uint64 + totalDistance uint64 + totalCorrectSource uint64 + totalCorrectTarget uint64 + totalCorrectHead uint64 + totalProposedCount uint64 + totalAggregations uint64 + totalSyncComitteeContributions uint64 + totalSyncComitteeAggregations uint64 } // ValidatorMonitorConfig contains the list of validator indices that the // monitor service tracks, as well as the event feed notifier that the // monitor needs to subscribe. type ValidatorMonitorConfig struct { - StateGen stategen.StateManager - TrackedValidators map[types.ValidatorIndex]interface{} + StateGen stategen.StateManager } // Service is the main structure that tracks validators and reports logs and // metrics of their performances throughout their lifetime. type Service struct { - config *ValidatorMonitorConfig - latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance - aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance + config *ValidatorMonitorConfig + + // Locks access to TrackedValidators, latestPerformance, aggregatedPerformance, + // trackedSyncedCommitteeIndices and lastSyncedEpoch + sync.RWMutex + + TrackedValidators map[types.ValidatorIndex]interface{} + latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance + aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance + trackedSyncCommitteeIndices map[types.ValidatorIndex][]types.CommitteeIndex + lastSyncedEpoch types.Epoch } // TrackedIndex returns if the given validator index corresponds to one of the -// validators we follow -func (s *Service) TrackedIndex(idx types.ValidatorIndex) bool { - _, ok := s.config.TrackedValidators[idx] +// validators we follow. +// It assumes the caller holds the service Lock +func (s *Service) trackedIndex(idx types.ValidatorIndex) bool { + _, ok := s.TrackedValidators[idx] return ok } + +// updateSyncCommitteeTrackedVals updates the sync committee assignments of our +// tracked validators. It gets called when we sync a block after the Sync Period changes. +func (s *Service) updateSyncCommitteeTrackedVals(state state.BeaconState) { + s.Lock() + defer s.Unlock() + for idx := range s.TrackedValidators { + syncIdx, err := helpers.CurrentPeriodSyncSubcommitteeIndices(state, idx) + if err != nil { + log.WithError(err).WithField("ValidatorIndex", idx).Error( + "Sync committee assignments will not be reported") + delete(s.trackedSyncCommitteeIndices, idx) + } else if len(syncIdx) == 0 { + delete(s.trackedSyncCommitteeIndices, idx) + } else { + s.trackedSyncCommitteeIndices[idx] = syncIdx + } + } + s.lastSyncedEpoch = slots.ToEpoch(state.Slot()) +} diff --git a/beacon-chain/monitor/service_test.go b/beacon-chain/monitor/service_test.go index eaf231f1c5..8c715cdc31 100644 --- a/beacon-chain/monitor/service_test.go +++ b/beacon-chain/monitor/service_test.go @@ -5,17 +5,41 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/testing/require" + "github.com/prysmaticlabs/prysm/testing/util" + logTest "github.com/sirupsen/logrus/hooks/test" ) func TestTrackedIndex(t *testing.T) { s := &Service{ - config: &ValidatorMonitorConfig{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, + TrackedValidators: map[types.ValidatorIndex]interface{}{ + 1: nil, + 2: nil, }, } - require.Equal(t, s.TrackedIndex(types.ValidatorIndex(1)), true) - require.Equal(t, s.TrackedIndex(types.ValidatorIndex(3)), false) + require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true) + require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false) +} + +func TestUpdateSyncCommitteeTrackedVals(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 1024) + + pubKeys := make([][]byte, 3) + pubKeys[0] = state.Validators()[0].PublicKey + pubKeys[1] = state.Validators()[1].PublicKey + pubKeys[2] = state.Validators()[2].PublicKey + + currentSyncCommittee := util.ConvertToCommittee([][]byte{ + pubKeys[0], pubKeys[1], pubKeys[2], pubKeys[1], pubKeys[1], + }) + require.NoError(t, state.SetCurrentSyncCommittee(currentSyncCommittee)) + + s.updateSyncCommitteeTrackedVals(state) + require.LogsDoNotContain(t, hook, "Sync committee assignments will not be reported") + newTrackedSyncIndices := map[types.ValidatorIndex][]types.CommitteeIndex{ + 1: {1, 3, 4}, + 2: {2}, + } + require.DeepEqual(t, s.trackedSyncCommitteeIndices, newTrackedSyncIndices) } diff --git a/testing/util/sync_committee.go b/testing/util/sync_committee.go index 3f63d0cb16..6bf56ba7d9 100644 --- a/testing/util/sync_committee.go +++ b/testing/util/sync_committee.go @@ -1,6 +1,8 @@ package util import ( + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) @@ -14,3 +16,21 @@ func HydrateSyncCommittee(s *ethpb.SyncCommitteeMessage) *ethpb.SyncCommitteeMes } return s } + +// ConvertToCommittee takes a list of pubkeys and returns a SyncCommittee with +// these keys as members. Some keys may appear repeated +func ConvertToCommittee(inputKeys [][]byte) *ethpb.SyncCommittee { + var pubKeys [][]byte + for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ { + if i < uint64(len(inputKeys)) { + pubKeys = append(pubKeys, bytesutil.PadTo(inputKeys[i], params.BeaconConfig().BLSPubkeyLength)) + } else { + pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength)) + } + } + + return ðpb.SyncCommittee{ + Pubkeys: pubKeys, + AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength), + } +} From 1d53fd2fd32f652dbf0c8b4d50ee49f03805c5d8 Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Wed, 24 Nov 2021 23:09:15 +0800 Subject: [PATCH 21/45] revert change (#9931) --- beacon-chain/sync/validate_aggregate_proof.go | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/beacon-chain/sync/validate_aggregate_proof.go b/beacon-chain/sync/validate_aggregate_proof.go index fdb5d2546b..40b8ec2d40 100644 --- a/beacon-chain/sync/validate_aggregate_proof.go +++ b/beacon-chain/sync/validate_aggregate_proof.go @@ -93,6 +93,15 @@ func (s *Service) validateAggregateAndProof(ctx context.Context, pid peer.ID, ms return pubsub.ValidationReject, errors.New("bad block referenced in attestation data") } + // Verify aggregate attestation has not already been seen via aggregate gossip, within a block, or through the creation locally. + seen, err := s.cfg.attPool.HasAggregatedAttestation(m.Message.Aggregate) + if err != nil { + tracing.AnnotateError(span, err) + return pubsub.ValidationIgnore, err + } + if seen { + return pubsub.ValidationIgnore, nil + } if !s.validateBlockInAttestation(ctx, m) { return pubsub.ValidationIgnore, nil } From 4ae75138353130e8b90426c126d9fc0d45abfc3c Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 24 Nov 2021 10:40:49 -0500 Subject: [PATCH 22/45] Import Keystores Standard API Implementation (#9924) * begin * rem deleted code * delete keystores all tests * surface errors to user * add in changes * del * tests * slice * begin import process * add import keystores logic * unit tests for import * tests for all import keystores keymanager issues * change proto * pbs * renaming works * use proper request * pb * comment * gaz * fix up cli cmd * test * add gw * precond * tests * radek comments --- proto/eth/service/key_management.pb.go | 16 +- proto/eth/service/key_management.pb.gw.go | 4 +- proto/eth/service/key_management.proto | 6 +- .../v1alpha1/validator-client/web_api.pb.go | 707 +++++++++--------- .../validator-client/web_api.pb.gw.go | 32 +- .../v1alpha1/validator-client/web_api.proto | 6 +- validator/accounts/accounts_import.go | 45 +- validator/accounts/accounts_import_test.go | 27 - validator/accounts/accounts_list_test.go | 3 +- validator/keymanager/derived/keymanager.go | 7 + validator/keymanager/imported/BUILD.bazel | 1 - validator/keymanager/imported/delete.go | 3 + validator/keymanager/imported/delete_test.go | 3 +- validator/keymanager/imported/import.go | 71 +- validator/keymanager/imported/import_test.go | 143 ++-- .../keymanager/imported/keymanager_test.go | 3 +- validator/keymanager/types.go | 4 +- validator/keymanager/types_test.go | 1 + validator/rpc/server.go | 2 + validator/rpc/standard_api.go | 36 + validator/rpc/standard_api_test.go | 129 ++++ validator/rpc/wallet.go | 22 +- validator/rpc/wallet_test.go | 47 +- 23 files changed, 771 insertions(+), 547 deletions(-) diff --git a/proto/eth/service/key_management.pb.go b/proto/eth/service/key_management.pb.go index b8a305b8eb..0500390780 100755 --- a/proto/eth/service/key_management.pb.go +++ b/proto/eth/service/key_management.pb.go @@ -661,7 +661,7 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{ 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, - 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, + 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, @@ -716,7 +716,7 @@ var file_proto_eth_service_key_management_proto_depIdxs = []int32{ 1, // 4: ethereum.eth.service.DeletedKeystoreStatus.status:type_name -> ethereum.eth.service.DeletedKeystoreStatus.Status 10, // 5: ethereum.eth.service.KeyManagement.ListKeystores:input_type -> google.protobuf.Empty 3, // 6: ethereum.eth.service.KeyManagement.ImportKeystores:input_type -> ethereum.eth.service.ImportKeystoresRequest - 3, // 7: ethereum.eth.service.KeyManagement.DeleteKeystores:input_type -> ethereum.eth.service.ImportKeystoresRequest + 5, // 7: ethereum.eth.service.KeyManagement.DeleteKeystores:input_type -> ethereum.eth.service.DeleteKeystoresRequest 2, // 8: ethereum.eth.service.KeyManagement.ListKeystores:output_type -> ethereum.eth.service.ListKeystoresResponse 4, // 9: ethereum.eth.service.KeyManagement.ImportKeystores:output_type -> ethereum.eth.service.ImportKeystoresResponse 6, // 10: ethereum.eth.service.KeyManagement.DeleteKeystores:output_type -> ethereum.eth.service.DeleteKeystoresResponse @@ -865,7 +865,7 @@ const _ = grpc.SupportPackageIsVersion6 type KeyManagementClient interface { ListKeystores(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListKeystoresResponse, error) ImportKeystores(ctx context.Context, in *ImportKeystoresRequest, opts ...grpc.CallOption) (*ImportKeystoresResponse, error) - DeleteKeystores(ctx context.Context, in *ImportKeystoresRequest, opts ...grpc.CallOption) (*DeleteKeystoresResponse, error) + DeleteKeystores(ctx context.Context, in *DeleteKeystoresRequest, opts ...grpc.CallOption) (*DeleteKeystoresResponse, error) } type keyManagementClient struct { @@ -894,7 +894,7 @@ func (c *keyManagementClient) ImportKeystores(ctx context.Context, in *ImportKey return out, nil } -func (c *keyManagementClient) DeleteKeystores(ctx context.Context, in *ImportKeystoresRequest, opts ...grpc.CallOption) (*DeleteKeystoresResponse, error) { +func (c *keyManagementClient) DeleteKeystores(ctx context.Context, in *DeleteKeystoresRequest, opts ...grpc.CallOption) (*DeleteKeystoresResponse, error) { out := new(DeleteKeystoresResponse) err := c.cc.Invoke(ctx, "/ethereum.eth.service.KeyManagement/DeleteKeystores", in, out, opts...) if err != nil { @@ -907,7 +907,7 @@ func (c *keyManagementClient) DeleteKeystores(ctx context.Context, in *ImportKey type KeyManagementServer interface { ListKeystores(context.Context, *empty.Empty) (*ListKeystoresResponse, error) ImportKeystores(context.Context, *ImportKeystoresRequest) (*ImportKeystoresResponse, error) - DeleteKeystores(context.Context, *ImportKeystoresRequest) (*DeleteKeystoresResponse, error) + DeleteKeystores(context.Context, *DeleteKeystoresRequest) (*DeleteKeystoresResponse, error) } // UnimplementedKeyManagementServer can be embedded to have forward compatible implementations. @@ -920,7 +920,7 @@ func (*UnimplementedKeyManagementServer) ListKeystores(context.Context, *empty.E func (*UnimplementedKeyManagementServer) ImportKeystores(context.Context, *ImportKeystoresRequest) (*ImportKeystoresResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method ImportKeystores not implemented") } -func (*UnimplementedKeyManagementServer) DeleteKeystores(context.Context, *ImportKeystoresRequest) (*DeleteKeystoresResponse, error) { +func (*UnimplementedKeyManagementServer) DeleteKeystores(context.Context, *DeleteKeystoresRequest) (*DeleteKeystoresResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method DeleteKeystores not implemented") } @@ -965,7 +965,7 @@ func _KeyManagement_ImportKeystores_Handler(srv interface{}, ctx context.Context } func _KeyManagement_DeleteKeystores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ImportKeystoresRequest) + in := new(DeleteKeystoresRequest) if err := dec(in); err != nil { return nil, err } @@ -977,7 +977,7 @@ func _KeyManagement_DeleteKeystores_Handler(srv interface{}, ctx context.Context FullMethod: "/ethereum.eth.service.KeyManagement/DeleteKeystores", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(KeyManagementServer).DeleteKeystores(ctx, req.(*ImportKeystoresRequest)) + return srv.(KeyManagementServer).DeleteKeystores(ctx, req.(*DeleteKeystoresRequest)) } return interceptor(ctx, in, info, handler) } diff --git a/proto/eth/service/key_management.pb.gw.go b/proto/eth/service/key_management.pb.gw.go index 3c160b8b0b..8dd77e4da8 100755 --- a/proto/eth/service/key_management.pb.gw.go +++ b/proto/eth/service/key_management.pb.gw.go @@ -94,7 +94,7 @@ var ( ) func request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ImportKeystoresRequest + var protoReq DeleteKeystoresRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { @@ -110,7 +110,7 @@ func request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshaler runt } func local_request_KeyManagement_DeleteKeystores_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ImportKeystoresRequest + var protoReq DeleteKeystoresRequest var metadata runtime.ServerMetadata if err := req.ParseForm(); err != nil { diff --git a/proto/eth/service/key_management.proto b/proto/eth/service/key_management.proto index 94da434e70..f5ce412232 100644 --- a/proto/eth/service/key_management.proto +++ b/proto/eth/service/key_management.proto @@ -48,8 +48,8 @@ service KeyManagement { }; } - // ImportKeystores generated by the Eth2.0 deposit CLI tooling. All keystores MUST be encrypted with - // the same password. Users SHOULD send slashing_protection data associated with the imported + // ImportKeystores generated by the Eth2.0 deposit CLI tooling. + // Users SHOULD send slashing_protection data associated with the imported // pubkeys. MUST follow the format defined in EIP-3076: Slashing Protection Interchange Format. // // HTTP response status codes: @@ -81,7 +81,7 @@ service KeyManagement { // - 401: Unauthorized // - 403: Forbidden from accessing the resource // - 500: Validator internal error - rpc DeleteKeystores(ImportKeystoresRequest) returns (DeleteKeystoresResponse) { + rpc DeleteKeystores(DeleteKeystoresRequest) returns (DeleteKeystoresResponse) { option (google.api.http) = { delete: "/internal/eth/v1/keystores", body: "*" diff --git a/proto/prysm/v1alpha1/validator-client/web_api.pb.go b/proto/prysm/v1alpha1/validator-client/web_api.pb.go index cad094e77c..8ab548f56a 100755 --- a/proto/prysm/v1alpha1/validator-client/web_api.pb.go +++ b/proto/prysm/v1alpha1/validator-client/web_api.pb.go @@ -1035,7 +1035,7 @@ func (x *HasWalletResponse) GetWalletExists() bool { return false } -type ImportKeystoresRequest struct { +type ImportAccountsRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1044,8 +1044,8 @@ type ImportKeystoresRequest struct { KeystoresPassword string `protobuf:"bytes,2,opt,name=keystores_password,json=keystoresPassword,proto3" json:"keystores_password,omitempty"` } -func (x *ImportKeystoresRequest) Reset() { - *x = ImportKeystoresRequest{} +func (x *ImportAccountsRequest) Reset() { + *x = ImportAccountsRequest{} if protoimpl.UnsafeEnabled { mi := &file_proto_prysm_v1alpha1_validator_client_web_api_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1053,13 +1053,13 @@ func (x *ImportKeystoresRequest) Reset() { } } -func (x *ImportKeystoresRequest) String() string { +func (x *ImportAccountsRequest) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ImportKeystoresRequest) ProtoMessage() {} +func (*ImportAccountsRequest) ProtoMessage() {} -func (x *ImportKeystoresRequest) ProtoReflect() protoreflect.Message { +func (x *ImportAccountsRequest) ProtoReflect() protoreflect.Message { mi := &file_proto_prysm_v1alpha1_validator_client_web_api_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1071,26 +1071,26 @@ func (x *ImportKeystoresRequest) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ImportKeystoresRequest.ProtoReflect.Descriptor instead. -func (*ImportKeystoresRequest) Descriptor() ([]byte, []int) { +// Deprecated: Use ImportAccountsRequest.ProtoReflect.Descriptor instead. +func (*ImportAccountsRequest) Descriptor() ([]byte, []int) { return file_proto_prysm_v1alpha1_validator_client_web_api_proto_rawDescGZIP(), []int{15} } -func (x *ImportKeystoresRequest) GetKeystoresImported() []string { +func (x *ImportAccountsRequest) GetKeystoresImported() []string { if x != nil { return x.KeystoresImported } return nil } -func (x *ImportKeystoresRequest) GetKeystoresPassword() string { +func (x *ImportAccountsRequest) GetKeystoresPassword() string { if x != nil { return x.KeystoresPassword } return "" } -type ImportKeystoresResponse struct { +type ImportAccountsResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields @@ -1098,8 +1098,8 @@ type ImportKeystoresResponse struct { ImportedPublicKeys [][]byte `protobuf:"bytes,1,rep,name=imported_public_keys,json=importedPublicKeys,proto3" json:"imported_public_keys,omitempty"` } -func (x *ImportKeystoresResponse) Reset() { - *x = ImportKeystoresResponse{} +func (x *ImportAccountsResponse) Reset() { + *x = ImportAccountsResponse{} if protoimpl.UnsafeEnabled { mi := &file_proto_prysm_v1alpha1_validator_client_web_api_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1107,13 +1107,13 @@ func (x *ImportKeystoresResponse) Reset() { } } -func (x *ImportKeystoresResponse) String() string { +func (x *ImportAccountsResponse) String() string { return protoimpl.X.MessageStringOf(x) } -func (*ImportKeystoresResponse) ProtoMessage() {} +func (*ImportAccountsResponse) ProtoMessage() {} -func (x *ImportKeystoresResponse) ProtoReflect() protoreflect.Message { +func (x *ImportAccountsResponse) ProtoReflect() protoreflect.Message { mi := &file_proto_prysm_v1alpha1_validator_client_web_api_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) @@ -1125,12 +1125,12 @@ func (x *ImportKeystoresResponse) ProtoReflect() protoreflect.Message { return mi.MessageOf(x) } -// Deprecated: Use ImportKeystoresResponse.ProtoReflect.Descriptor instead. -func (*ImportKeystoresResponse) Descriptor() ([]byte, []int) { +// Deprecated: Use ImportAccountsResponse.ProtoReflect.Descriptor instead. +func (*ImportAccountsResponse) Descriptor() ([]byte, []int) { return file_proto_prysm_v1alpha1_validator_client_web_api_proto_rawDescGZIP(), []int{16} } -func (x *ImportKeystoresResponse) GetImportedPublicKeys() [][]byte { +func (x *ImportAccountsResponse) GetImportedPublicKeys() [][]byte { if x != nil { return x.ImportedPublicKeys } @@ -1867,330 +1867,329 @@ var file_proto_prysm_v1alpha1_validator_client_web_api_proto_rawDesc = []byte{ 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x5f, 0x65, 0x78, 0x69, 0x73, 0x74, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0c, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x45, 0x78, 0x69, 0x73, 0x74, 0x73, - 0x22, 0x76, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, - 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x65, - 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, - 0x18, 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, - 0x73, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x65, 0x79, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, - 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4b, 0x0a, 0x17, 0x49, 0x6d, 0x70, 0x6f, - 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, - 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, - 0x0c, 0x52, 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, - 0x63, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x2d, 0x0a, 0x15, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, - 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, - 0x0a, 0x05, 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, - 0x6f, 0x6b, 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x16, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, - 0x0a, 0x0d, 0x68, 0x61, 0x73, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x18, - 0x01, 0x20, 0x01, 0x28, 0x08, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, - 0x55, 0x70, 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x61, 0x73, 0x5f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, - 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x61, 0x73, 0x57, 0x61, 0x6c, 0x6c, 0x65, - 0x74, 0x22, 0x9e, 0x02, 0x0a, 0x14, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, - 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x65, - 0x61, 0x63, 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, - 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, - 0x4e, 0x6f, 0x64, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, - 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, - 0x09, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x79, - 0x6e, 0x63, 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x79, 0x6e, - 0x63, 0x69, 0x6e, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, - 0x74, 0x69, 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x67, 0x65, 0x6e, 0x65, - 0x73, 0x69, 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, - 0x65, 0x73, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x64, 0x65, 0x70, 0x6f, 0x73, - 0x69, 0x74, 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, - 0x73, 0x12, 0x3f, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x18, - 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, - 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, - 0x61, 0x69, 0x6e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, - 0x61, 0x64, 0x22, 0x37, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, - 0x78, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x75, + 0x22, 0x75, 0x0a, 0x15, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x65, 0x79, + 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x18, + 0x01, 0x20, 0x03, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x12, 0x2d, 0x0a, 0x12, 0x6b, 0x65, 0x79, 0x73, + 0x74, 0x6f, 0x72, 0x65, 0x73, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x11, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x50, + 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x4a, 0x0a, 0x16, 0x49, 0x6d, 0x70, 0x6f, 0x72, + 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x30, 0x0a, 0x14, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x5f, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, - 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x38, 0x0a, 0x15, 0x56, - 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, - 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x65, - 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x61, 0x0a, 0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x41, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, - 0x0a, 0x0b, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, - 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, - 0x27, 0x0a, 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, - 0x72, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, - 0x50, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x33, 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b, - 0x75, 0x70, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, - 0x73, 0x65, 0x12, 0x19, 0x0a, 0x08, 0x7a, 0x69, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, - 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x7a, 0x69, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x4a, 0x0a, - 0x15, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, - 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, - 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x12, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, - 0x73, 0x54, 0x6f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x3b, 0x0a, 0x16, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, - 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, - 0x65, 0x64, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x36, 0x0a, 0x20, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x69, - 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x5b, - 0x0a, 0x1f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, - 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, - 0x74, 0x12, 0x38, 0x0a, 0x18, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, - 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, - 0x01, 0x28, 0x09, 0x52, 0x16, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, - 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x73, 0x6f, 0x6e, 0x2a, 0x37, 0x0a, 0x0e, 0x4b, - 0x65, 0x79, 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0b, 0x0a, - 0x07, 0x44, 0x45, 0x52, 0x49, 0x56, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, - 0x50, 0x4f, 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, - 0x54, 0x45, 0x10, 0x02, 0x32, 0x9c, 0x06, 0x0a, 0x06, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, - 0xa1, 0x01, 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, - 0x12, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, - 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, - 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x20, 0x22, 0x1b, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, - 0x3a, 0x01, 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, - 0x66, 0x69, 0x67, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2e, 0x2e, 0x65, 0x74, - 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x6c, - 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x16, 0x12, 0x14, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, 0xb4, 0x01, 0x0a, 0x0f, 0x49, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x36, 0x2e, - 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, - 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x37, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, - 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, - 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x6b, 0x65, 0x79, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x01, 0x2a, - 0x12, 0x99, 0x01, 0x0a, 0x11, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, - 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, - 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, - 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, - 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x32, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, - 0x22, 0x27, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, - 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xa4, 0x01, 0x0a, - 0x0d, 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, 0x34, + 0x12, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x65, 0x64, 0x50, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, + 0x65, 0x79, 0x73, 0x22, 0x2d, 0x0a, 0x15, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, + 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x14, 0x0a, 0x05, + 0x74, 0x6f, 0x6b, 0x65, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x74, 0x6f, 0x6b, + 0x65, 0x6e, 0x22, 0x5b, 0x0a, 0x16, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, + 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x22, 0x0a, 0x0d, + 0x68, 0x61, 0x73, 0x5f, 0x73, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x5f, 0x75, 0x70, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x08, 0x52, 0x0b, 0x68, 0x61, 0x73, 0x53, 0x69, 0x67, 0x6e, 0x65, 0x64, 0x55, 0x70, + 0x12, 0x1d, 0x0a, 0x0a, 0x68, 0x61, 0x73, 0x5f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x18, 0x02, + 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x68, 0x61, 0x73, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x22, + 0x9e, 0x02, 0x0a, 0x14, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x30, 0x0a, 0x14, 0x62, 0x65, 0x61, 0x63, + 0x6f, 0x6e, 0x5f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x12, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x4e, 0x6f, + 0x64, 0x65, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x12, 0x1c, 0x0a, 0x09, 0x63, 0x6f, + 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x08, 0x52, 0x09, 0x63, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x65, 0x64, 0x12, 0x18, 0x0a, 0x07, 0x73, 0x79, 0x6e, 0x63, + 0x69, 0x6e, 0x67, 0x18, 0x03, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x73, 0x79, 0x6e, 0x63, 0x69, + 0x6e, 0x67, 0x12, 0x21, 0x0a, 0x0c, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, 0x73, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x18, 0x04, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x67, 0x65, 0x6e, 0x65, 0x73, 0x69, + 0x73, 0x54, 0x69, 0x6d, 0x65, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x5f, 0x63, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x5f, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, + 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x16, 0x64, 0x65, 0x70, 0x6f, 0x73, 0x69, 0x74, + 0x43, 0x6f, 0x6e, 0x74, 0x72, 0x61, 0x63, 0x74, 0x41, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x12, + 0x3f, 0x0a, 0x0a, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x5f, 0x68, 0x65, 0x61, 0x64, 0x18, 0x06, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, + 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x43, 0x68, 0x61, 0x69, + 0x6e, 0x48, 0x65, 0x61, 0x64, 0x52, 0x09, 0x63, 0x68, 0x61, 0x69, 0x6e, 0x48, 0x65, 0x61, 0x64, + 0x22, 0x37, 0x0a, 0x14, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, + 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x70, 0x75, 0x62, 0x6c, + 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x70, + 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x22, 0x38, 0x0a, 0x15, 0x56, 0x6f, 0x6c, + 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, + 0x73, 0x65, 0x12, 0x1f, 0x0a, 0x0b, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0a, 0x65, 0x78, 0x69, 0x74, 0x65, 0x64, 0x4b, + 0x65, 0x79, 0x73, 0x22, 0x61, 0x0a, 0x15, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x41, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x1f, 0x0a, 0x0b, + 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, 0x65, 0x79, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, + 0x0c, 0x52, 0x0a, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x27, 0x0a, + 0x0f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x5f, 0x70, 0x61, 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, + 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0e, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x50, 0x61, + 0x73, 0x73, 0x77, 0x6f, 0x72, 0x64, 0x22, 0x33, 0x0a, 0x16, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x12, 0x19, 0x0a, 0x08, 0x7a, 0x69, 0x70, 0x5f, 0x66, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, + 0x28, 0x0c, 0x52, 0x07, 0x7a, 0x69, 0x70, 0x46, 0x69, 0x6c, 0x65, 0x22, 0x4a, 0x0a, 0x15, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, + 0x75, 0x65, 0x73, 0x74, 0x12, 0x31, 0x0a, 0x15, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x5f, 0x6b, + 0x65, 0x79, 0x73, 0x5f, 0x74, 0x6f, 0x5f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0c, 0x52, 0x12, 0x70, 0x75, 0x62, 0x6c, 0x69, 0x63, 0x4b, 0x65, 0x79, 0x73, 0x54, + 0x6f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x22, 0x3b, 0x0a, 0x16, 0x44, 0x65, 0x6c, 0x65, 0x74, + 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, + 0x65, 0x12, 0x21, 0x0a, 0x0c, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x6b, 0x65, 0x79, + 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0c, 0x52, 0x0b, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x64, + 0x4b, 0x65, 0x79, 0x73, 0x22, 0x36, 0x0a, 0x20, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, + 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x66, 0x69, 0x6c, 0x65, + 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x66, 0x69, 0x6c, 0x65, 0x22, 0x5b, 0x0a, 0x1f, + 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, + 0x38, 0x0a, 0x18, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x5f, 0x70, 0x72, 0x6f, 0x74, + 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x5f, 0x6a, 0x73, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x16, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, + 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x4a, 0x73, 0x6f, 0x6e, 0x2a, 0x37, 0x0a, 0x0e, 0x4b, 0x65, 0x79, + 0x6d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x72, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x0b, 0x0a, 0x07, 0x44, + 0x45, 0x52, 0x49, 0x56, 0x45, 0x44, 0x10, 0x00, 0x12, 0x0c, 0x0a, 0x08, 0x49, 0x4d, 0x50, 0x4f, + 0x52, 0x54, 0x45, 0x44, 0x10, 0x01, 0x12, 0x0a, 0x0a, 0x06, 0x52, 0x45, 0x4d, 0x4f, 0x54, 0x45, + 0x10, 0x02, 0x32, 0x99, 0x06, 0x0a, 0x06, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, 0xa1, 0x01, + 0x0a, 0x0c, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, - 0x52, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, - 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, - 0x65, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x21, 0x22, 0x1c, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, - 0x3a, 0x01, 0x2a, 0x32, 0xb6, 0x05, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, - 0x12, 0x99, 0x01, 0x0a, 0x0c, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x12, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, + 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x20, 0x22, 0x1b, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x3a, 0x01, + 0x2a, 0x12, 0x74, 0x0a, 0x0c, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x43, 0x6f, 0x6e, 0x66, 0x69, + 0x67, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x57, 0x61, 0x6c, 0x6c, 0x65, + 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1c, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x16, 0x12, 0x14, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, 0xb1, 0x01, 0x0a, 0x0e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x35, 0x2e, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x73, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x01, 0x2a, 0x12, 0x99, 0x01, 0x0a, 0x11, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, + 0x73, 0x12, 0x38, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, + 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x22, 0x32, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x22, 0x27, 0x2f, 0x76, 0x32, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, + 0x74, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xa4, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x63, 0x6f, + 0x76, 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x12, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x52, 0x65, 0x63, 0x6f, 0x76, + 0x65, 0x72, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, + 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, + 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x22, 0x1c, 0x2f, + 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x77, 0x61, 0x6c, + 0x6c, 0x65, 0x74, 0x2f, 0x72, 0x65, 0x63, 0x6f, 0x76, 0x65, 0x72, 0x3a, 0x01, 0x2a, 0x32, 0xb6, + 0x05, 0x0a, 0x08, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x0c, + 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x33, 0x2e, 0x65, + 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, + 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x18, 0x12, + 0x16, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x61, + 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0xa9, 0x01, 0x0a, 0x0e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x35, 0x2e, 0x65, 0x74, 0x68, + 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, + 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, + 0x75, 0x70, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, + 0x74, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x22, 0x22, 0x1d, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x62, 0x61, 0x63, 0x6b, 0x75, 0x70, + 0x3a, 0x01, 0x2a, 0x12, 0xb0, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x41, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x1e, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x18, 0x12, 0x16, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0xa9, 0x01, 0x0a, - 0x0e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, - 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, - 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, - 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, - 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x61, 0x63, 0x6b, 0x75, 0x70, 0x41, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x28, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x22, 0x22, 0x1d, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x62, - 0x61, 0x63, 0x6b, 0x75, 0x70, 0x3a, 0x01, 0x2a, 0x12, 0xb0, 0x01, 0x0a, 0x0e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x12, 0x35, 0x2e, 0x65, 0x74, - 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, - 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, - 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, - 0x73, 0x74, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, - 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, - 0x74, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x29, 0x22, 0x24, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x2f, 0x77, 0x61, 0x6c, 0x6c, 0x65, 0x74, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x2f, 0x64, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xae, 0x01, 0x0a, 0x0d, - 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x12, 0x34, 0x2e, + 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x36, 0x2e, + 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x44, + 0x65, 0x6c, 0x65, 0x74, 0x65, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x22, 0x24, 0x2f, + 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x77, 0x61, 0x6c, + 0x6c, 0x65, 0x74, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x64, 0x65, 0x6c, + 0x65, 0x74, 0x65, 0x3a, 0x01, 0x2a, 0x12, 0xae, 0x01, 0x0a, 0x0d, 0x56, 0x6f, 0x6c, 0x75, 0x6e, + 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x12, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, + 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, + 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, + 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x22, 0x25, 0x2f, + 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x61, 0x63, 0x63, + 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x2d, + 0x65, 0x78, 0x69, 0x74, 0x3a, 0x01, 0x2a, 0x32, 0xfd, 0x07, 0x0a, 0x06, 0x42, 0x65, 0x61, 0x63, + 0x6f, 0x6e, 0x12, 0x84, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, + 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, + 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, + 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, + 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x65, 0x73, 0x70, + 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x76, + 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, + 0x6f, 0x6e, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0xb7, 0x01, 0x0a, 0x19, 0x47, 0x65, + 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, + 0x69, 0x70, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, + 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, + 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x61, 0x72, 0x74, + 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, + 0x1a, 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, + 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x24, 0x12, + 0x22, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, + 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x70, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, + 0x69, 0x6f, 0x6e, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x6f, 0x72, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x12, + 0x32, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, + 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, + 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, + 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, 0x65, + 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, + 0x12, 0x1c, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, + 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x89, + 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, + 0x12, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, + 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x21, + 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, + 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x73, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, 0x2f, 0x76, 0x32, 0x2f, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x9c, 0x01, 0x0a, 0x14, 0x47, + 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x61, 0x6c, 0x61, 0x6e, + 0x63, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, + 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, 0x73, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, + 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, + 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, + 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, + 0x65, 0x73, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, 0x1d, 0x2f, 0x76, 0x32, 0x2f, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, + 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x76, 0x0a, 0x11, 0x47, 0x65, 0x74, + 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x51, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, + 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, + 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x25, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, + 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x51, 0x75, 0x65, 0x75, 0x65, 0x22, 0x22, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x71, 0x75, 0x65, 0x75, + 0x65, 0x12, 0x64, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, 0x72, 0x73, 0x12, 0x16, 0x2e, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, + 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, + 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x50, 0x65, + 0x65, 0x72, 0x73, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x76, 0x32, + 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, + 0x6e, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x32, 0xe8, 0x02, 0x0a, 0x12, 0x53, 0x6c, 0x61, 0x73, + 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa6, + 0x01, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, + 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x40, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, + 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, + 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2a, 0x12, 0x28, 0x2f, + 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x73, 0x6c, 0x61, + 0x73, 0x68, 0x69, 0x6e, 0x67, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, + 0x2f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0xa8, 0x01, 0x0a, 0x18, 0x49, 0x6d, 0x70, 0x6f, + 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, + 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, + 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, + 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x33, 0x82, + 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x22, 0x28, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, + 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x2d, 0x70, 0x72, + 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x69, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x3a, + 0x01, 0x2a, 0x32, 0xbf, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x12, 0x97, 0x01, + 0x0a, 0x17, 0x47, 0x65, 0x74, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x4e, 0x6f, 0x64, 0x65, 0x43, + 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, + 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, + 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, + 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, 0x82, 0xd3, 0xe4, 0x93, 0x02, + 0x26, 0x12, 0x24, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6e, 0x6f, 0x64, 0x65, 0x5f, 0x63, 0x6f, 0x6e, + 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, 0x10, 0x47, 0x65, 0x74, 0x4c, + 0x6f, 0x67, 0x73, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x16, 0x2e, 0x67, + 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, + 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, + 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, + 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, + 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2b, 0x82, 0xd3, 0xe4, 0x93, + 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, + 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x65, 0x6e, + 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x7b, 0x0a, 0x0a, 0x47, 0x65, 0x74, 0x56, 0x65, + 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x56, - 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, 0x69, 0x74, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x6f, 0x6c, 0x75, 0x6e, 0x74, 0x61, 0x72, 0x79, 0x45, 0x78, - 0x69, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x2a, 0x22, 0x25, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x2f, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2f, 0x76, 0x6f, 0x6c, 0x75, 0x6e, - 0x74, 0x61, 0x72, 0x79, 0x2d, 0x65, 0x78, 0x69, 0x74, 0x3a, 0x01, 0x2a, 0x32, 0xfd, 0x07, 0x0a, - 0x06, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x12, 0x84, 0x01, 0x0a, 0x0f, 0x47, 0x65, 0x74, 0x42, - 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, - 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, - 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, - 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, - 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x53, 0x74, 0x61, 0x74, 0x75, - 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, - 0x1d, 0x12, 0x1b, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, - 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0xb7, - 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, - 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x37, 0x2e, 0x65, - 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, - 0x70, 0x68, 0x61, 0x31, 0x2e, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, - 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, - 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x61, 0x72, 0x74, 0x69, 0x63, 0x69, 0x70, 0x61, - 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2a, 0x82, 0xd3, - 0xe4, 0x93, 0x02, 0x24, 0x12, 0x22, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, - 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x70, 0x61, 0x72, 0x74, 0x69, - 0x63, 0x69, 0x70, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0xa8, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, - 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, - 0x61, 0x6e, 0x63, 0x65, 0x12, 0x32, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, - 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, 0x6d, 0x61, 0x6e, 0x63, - 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x50, 0x65, 0x72, 0x66, 0x6f, 0x72, - 0x6d, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, - 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x73, 0x75, 0x6d, 0x6d, - 0x61, 0x72, 0x79, 0x12, 0x89, 0x01, 0x0a, 0x0d, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, - 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x69, - 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x52, 0x65, 0x71, 0x75, - 0x65, 0x73, 0x74, 0x1a, 0x21, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, - 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x22, 0x27, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x21, 0x12, 0x1f, - 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, - 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x73, 0x12, - 0x9c, 0x01, 0x0a, 0x14, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, - 0x42, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x33, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, - 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x42, 0x61, - 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x28, 0x2e, - 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, - 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x42, - 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x12, - 0x1d, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, - 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x62, 0x61, 0x6c, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x76, - 0x0a, 0x11, 0x47, 0x65, 0x74, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x51, 0x75, - 0x65, 0x75, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, - 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x25, 0x2e, 0x65, 0x74, - 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, - 0x68, 0x61, 0x31, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x51, 0x75, 0x65, - 0x75, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x76, 0x32, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, - 0x2f, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x64, 0x0a, 0x08, 0x47, 0x65, 0x74, 0x50, 0x65, 0x65, - 0x72, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, - 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x1c, 0x2e, 0x65, 0x74, 0x68, - 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, - 0x61, 0x31, 0x2e, 0x50, 0x65, 0x65, 0x72, 0x73, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, - 0x12, 0x1a, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, - 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x70, 0x65, 0x65, 0x72, 0x73, 0x32, 0xe8, 0x02, 0x0a, - 0x12, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x12, 0xa6, 0x01, 0x0a, 0x18, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, - 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x40, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, - 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, - 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x30, 0x82, 0xd3, 0xe4, 0x93, - 0x02, 0x2a, 0x12, 0x28, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, - 0x72, 0x2f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x65, - 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x65, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0xa8, 0x01, 0x0a, - 0x18, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, - 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x3f, 0x2e, 0x65, 0x74, 0x68, 0x65, - 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, - 0x74, 0x53, 0x6c, 0x61, 0x73, 0x68, 0x69, 0x6e, 0x67, 0x50, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, - 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x22, 0x33, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2d, 0x22, 0x28, 0x2f, 0x76, 0x32, 0x2f, - 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x73, 0x6c, 0x61, 0x73, 0x68, 0x69, - 0x6e, 0x67, 0x2d, 0x70, 0x72, 0x6f, 0x74, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x2f, 0x69, 0x6d, - 0x70, 0x6f, 0x72, 0x74, 0x3a, 0x01, 0x2a, 0x32, 0xbf, 0x05, 0x0a, 0x06, 0x48, 0x65, 0x61, 0x6c, - 0x74, 0x68, 0x12, 0x97, 0x01, 0x0a, 0x17, 0x47, 0x65, 0x74, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, - 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, - 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, - 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4e, 0x6f, 0x64, 0x65, 0x43, 0x6f, 0x6e, 0x6e, - 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2c, - 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x26, 0x12, 0x24, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6e, 0x6f, 0x64, - 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x6e, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, 0x12, 0x8d, 0x01, 0x0a, - 0x10, 0x47, 0x65, 0x74, 0x4c, 0x6f, 0x67, 0x73, 0x45, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, + 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x24, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x76, 0x65, 0x72, + 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, 0x0a, 0x10, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x42, + 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, + 0x79, 0x1a, 0x23, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, + 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, + 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x29, 0x12, 0x27, + 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, + 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x62, 0x65, 0x61, 0x63, 0x6f, 0x6e, + 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x30, 0x01, 0x12, 0x88, 0x01, 0x0a, 0x13, 0x53, 0x74, + 0x72, 0x65, 0x61, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, - 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, - 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x45, - 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, - 0x2b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x25, 0x12, 0x23, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, - 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6c, 0x6f, - 0x67, 0x73, 0x2f, 0x65, 0x6e, 0x64, 0x70, 0x6f, 0x69, 0x6e, 0x74, 0x73, 0x12, 0x7b, 0x0a, 0x0a, - 0x47, 0x65, 0x74, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, - 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, - 0x74, 0x79, 0x1a, 0x2f, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, - 0x2e, 0x76, 0x32, 0x2e, 0x56, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x24, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1e, 0x12, 0x1c, 0x2f, 0x76, 0x32, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x2f, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x82, 0x01, 0x0a, 0x10, 0x53, 0x74, - 0x72, 0x65, 0x61, 0x6d, 0x42, 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x16, - 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, - 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x23, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, - 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, - 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x2f, 0x82, 0xd3, 0xe4, - 0x93, 0x02, 0x29, 0x12, 0x27, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x62, - 0x65, 0x61, 0x63, 0x6f, 0x6e, 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x30, 0x01, 0x12, 0x88, - 0x01, 0x0a, 0x13, 0x53, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, - 0x6f, 0x72, 0x4c, 0x6f, 0x67, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, - 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x23, - 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, - 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, - 0x6e, 0x73, 0x65, 0x22, 0x32, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, - 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, - 0x68, 0x2f, 0x6c, 0x6f, 0x67, 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, - 0x2f, 0x73, 0x74, 0x72, 0x65, 0x61, 0x6d, 0x30, 0x01, 0x32, 0x86, 0x01, 0x0a, 0x04, 0x41, 0x75, - 0x74, 0x68, 0x12, 0x7e, 0x0a, 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, - 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, - 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, - 0x6c, 0x69, 0x7a, 0x65, 0x41, 0x75, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, - 0x22, 0x20, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, - 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, - 0x7a, 0x65, 0x42, 0xc4, 0x01, 0x0a, 0x22, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x2e, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x76, 0x32, 0x42, 0x08, 0x57, 0x65, 0x62, 0x50, 0x72, - 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x50, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, - 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, - 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, - 0x6d, 0x2f, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, - 0x61, 0x74, 0x6f, 0x72, 0x2d, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3b, 0x76, 0x61, 0x6c, 0x69, - 0x64, 0x61, 0x74, 0x6f, 0x72, 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, - 0x75, 0x6d, 0x2e, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x63, 0x63, - 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x45, 0x74, 0x68, 0x65, 0x72, - 0x65, 0x75, 0x6d, 0x5c, 0x56, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5c, 0x41, 0x63, - 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, - 0x33, + 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x23, 0x2e, 0x65, 0x74, 0x68, 0x65, + 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x76, 0x31, 0x61, 0x6c, 0x70, 0x68, 0x61, + 0x31, 0x2e, 0x4c, 0x6f, 0x67, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x32, + 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x2c, 0x12, 0x2a, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, + 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x68, 0x65, 0x61, 0x6c, 0x74, 0x68, 0x2f, 0x6c, 0x6f, 0x67, + 0x73, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x73, 0x74, 0x72, 0x65, + 0x61, 0x6d, 0x30, 0x01, 0x32, 0x86, 0x01, 0x0a, 0x04, 0x41, 0x75, 0x74, 0x68, 0x12, 0x7e, 0x0a, + 0x0a, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x12, 0x16, 0x2e, 0x67, 0x6f, + 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, + 0x70, 0x74, 0x79, 0x1a, 0x36, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x32, 0x2e, 0x49, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x41, + 0x75, 0x74, 0x68, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x20, 0x82, 0xd3, 0xe4, + 0x93, 0x02, 0x1a, 0x12, 0x18, 0x2f, 0x76, 0x32, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, + 0x6f, 0x72, 0x2f, 0x69, 0x6e, 0x69, 0x74, 0x69, 0x61, 0x6c, 0x69, 0x7a, 0x65, 0x42, 0xc4, 0x01, + 0x0a, 0x22, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x76, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x61, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x2e, 0x76, 0x32, 0x42, 0x08, 0x57, 0x65, 0x62, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, + 0x5a, 0x50, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, + 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x76, 0x31, 0x61, + 0x6c, 0x70, 0x68, 0x61, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2d, + 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x3b, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, + 0x70, 0x62, 0xaa, 0x02, 0x1e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x56, 0x61, + 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2e, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x73, + 0x2e, 0x56, 0x32, 0xca, 0x02, 0x1e, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x56, + 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x5c, 0x41, 0x63, 0x63, 0x6f, 0x75, 0x6e, 0x74, + 0x73, 0x5c, 0x56, 0x32, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( @@ -2224,8 +2223,8 @@ var file_proto_prysm_v1alpha1_validator_client_web_api_proto_goTypes = []interfa (*LogsEndpointResponse)(nil), // 13: ethereum.validator.accounts.v2.LogsEndpointResponse (*VersionResponse)(nil), // 14: ethereum.validator.accounts.v2.VersionResponse (*HasWalletResponse)(nil), // 15: ethereum.validator.accounts.v2.HasWalletResponse - (*ImportKeystoresRequest)(nil), // 16: ethereum.validator.accounts.v2.ImportKeystoresRequest - (*ImportKeystoresResponse)(nil), // 17: ethereum.validator.accounts.v2.ImportKeystoresResponse + (*ImportAccountsRequest)(nil), // 16: ethereum.validator.accounts.v2.ImportAccountsRequest + (*ImportAccountsResponse)(nil), // 17: ethereum.validator.accounts.v2.ImportAccountsResponse (*InitializeAuthRequest)(nil), // 18: ethereum.validator.accounts.v2.InitializeAuthRequest (*InitializeAuthResponse)(nil), // 19: ethereum.validator.accounts.v2.InitializeAuthResponse (*BeaconStatusResponse)(nil), // 20: ethereum.validator.accounts.v2.BeaconStatusResponse @@ -2259,7 +2258,7 @@ var file_proto_prysm_v1alpha1_validator_client_web_api_proto_depIdxs = []int32{ 29, // 4: ethereum.validator.accounts.v2.BeaconStatusResponse.chain_head:type_name -> ethereum.eth.v1alpha1.ChainHead 1, // 5: ethereum.validator.accounts.v2.Wallet.CreateWallet:input_type -> ethereum.validator.accounts.v2.CreateWalletRequest 30, // 6: ethereum.validator.accounts.v2.Wallet.WalletConfig:input_type -> google.protobuf.Empty - 16, // 7: ethereum.validator.accounts.v2.Wallet.ImportKeystores:input_type -> ethereum.validator.accounts.v2.ImportKeystoresRequest + 16, // 7: ethereum.validator.accounts.v2.Wallet.ImportAccounts:input_type -> ethereum.validator.accounts.v2.ImportAccountsRequest 7, // 8: ethereum.validator.accounts.v2.Wallet.ValidateKeystores:input_type -> ethereum.validator.accounts.v2.ValidateKeystoresRequest 6, // 9: ethereum.validator.accounts.v2.Wallet.RecoverWallet:input_type -> ethereum.validator.accounts.v2.RecoverWalletRequest 8, // 10: ethereum.validator.accounts.v2.Accounts.ListAccounts:input_type -> ethereum.validator.accounts.v2.ListAccountsRequest @@ -2283,7 +2282,7 @@ var file_proto_prysm_v1alpha1_validator_client_web_api_proto_depIdxs = []int32{ 30, // 28: ethereum.validator.accounts.v2.Auth.Initialize:input_type -> google.protobuf.Empty 2, // 29: ethereum.validator.accounts.v2.Wallet.CreateWallet:output_type -> ethereum.validator.accounts.v2.CreateWalletResponse 5, // 30: ethereum.validator.accounts.v2.Wallet.WalletConfig:output_type -> ethereum.validator.accounts.v2.WalletResponse - 17, // 31: ethereum.validator.accounts.v2.Wallet.ImportKeystores:output_type -> ethereum.validator.accounts.v2.ImportKeystoresResponse + 17, // 31: ethereum.validator.accounts.v2.Wallet.ImportAccounts:output_type -> ethereum.validator.accounts.v2.ImportAccountsResponse 30, // 32: ethereum.validator.accounts.v2.Wallet.ValidateKeystores:output_type -> google.protobuf.Empty 2, // 33: ethereum.validator.accounts.v2.Wallet.RecoverWallet:output_type -> ethereum.validator.accounts.v2.CreateWalletResponse 9, // 34: ethereum.validator.accounts.v2.Accounts.ListAccounts:output_type -> ethereum.validator.accounts.v2.ListAccountsResponse @@ -2499,7 +2498,7 @@ func file_proto_prysm_v1alpha1_validator_client_web_api_proto_init() { } } file_proto_prysm_v1alpha1_validator_client_web_api_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportKeystoresRequest); i { + switch v := v.(*ImportAccountsRequest); i { case 0: return &v.state case 1: @@ -2511,7 +2510,7 @@ func file_proto_prysm_v1alpha1_validator_client_web_api_proto_init() { } } file_proto_prysm_v1alpha1_validator_client_web_api_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { - switch v := v.(*ImportKeystoresResponse); i { + switch v := v.(*ImportAccountsResponse); i { case 0: return &v.state case 1: @@ -2690,7 +2689,7 @@ const _ = grpc.SupportPackageIsVersion6 type WalletClient interface { CreateWallet(ctx context.Context, in *CreateWalletRequest, opts ...grpc.CallOption) (*CreateWalletResponse, error) WalletConfig(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*WalletResponse, error) - ImportKeystores(ctx context.Context, in *ImportKeystoresRequest, opts ...grpc.CallOption) (*ImportKeystoresResponse, error) + ImportAccounts(ctx context.Context, in *ImportAccountsRequest, opts ...grpc.CallOption) (*ImportAccountsResponse, error) ValidateKeystores(ctx context.Context, in *ValidateKeystoresRequest, opts ...grpc.CallOption) (*empty.Empty, error) RecoverWallet(ctx context.Context, in *RecoverWalletRequest, opts ...grpc.CallOption) (*CreateWalletResponse, error) } @@ -2721,9 +2720,9 @@ func (c *walletClient) WalletConfig(ctx context.Context, in *empty.Empty, opts . return out, nil } -func (c *walletClient) ImportKeystores(ctx context.Context, in *ImportKeystoresRequest, opts ...grpc.CallOption) (*ImportKeystoresResponse, error) { - out := new(ImportKeystoresResponse) - err := c.cc.Invoke(ctx, "/ethereum.validator.accounts.v2.Wallet/ImportKeystores", in, out, opts...) +func (c *walletClient) ImportAccounts(ctx context.Context, in *ImportAccountsRequest, opts ...grpc.CallOption) (*ImportAccountsResponse, error) { + out := new(ImportAccountsResponse) + err := c.cc.Invoke(ctx, "/ethereum.validator.accounts.v2.Wallet/ImportAccounts", in, out, opts...) if err != nil { return nil, err } @@ -2752,7 +2751,7 @@ func (c *walletClient) RecoverWallet(ctx context.Context, in *RecoverWalletReque type WalletServer interface { CreateWallet(context.Context, *CreateWalletRequest) (*CreateWalletResponse, error) WalletConfig(context.Context, *empty.Empty) (*WalletResponse, error) - ImportKeystores(context.Context, *ImportKeystoresRequest) (*ImportKeystoresResponse, error) + ImportAccounts(context.Context, *ImportAccountsRequest) (*ImportAccountsResponse, error) ValidateKeystores(context.Context, *ValidateKeystoresRequest) (*empty.Empty, error) RecoverWallet(context.Context, *RecoverWalletRequest) (*CreateWalletResponse, error) } @@ -2767,8 +2766,8 @@ func (*UnimplementedWalletServer) CreateWallet(context.Context, *CreateWalletReq func (*UnimplementedWalletServer) WalletConfig(context.Context, *empty.Empty) (*WalletResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method WalletConfig not implemented") } -func (*UnimplementedWalletServer) ImportKeystores(context.Context, *ImportKeystoresRequest) (*ImportKeystoresResponse, error) { - return nil, status.Errorf(codes.Unimplemented, "method ImportKeystores not implemented") +func (*UnimplementedWalletServer) ImportAccounts(context.Context, *ImportAccountsRequest) (*ImportAccountsResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method ImportAccounts not implemented") } func (*UnimplementedWalletServer) ValidateKeystores(context.Context, *ValidateKeystoresRequest) (*empty.Empty, error) { return nil, status.Errorf(codes.Unimplemented, "method ValidateKeystores not implemented") @@ -2817,20 +2816,20 @@ func _Wallet_WalletConfig_Handler(srv interface{}, ctx context.Context, dec func return interceptor(ctx, in, info, handler) } -func _Wallet_ImportKeystores_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { - in := new(ImportKeystoresRequest) +func _Wallet_ImportAccounts_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ImportAccountsRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { - return srv.(WalletServer).ImportKeystores(ctx, in) + return srv.(WalletServer).ImportAccounts(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, - FullMethod: "/ethereum.validator.accounts.v2.Wallet/ImportKeystores", + FullMethod: "/ethereum.validator.accounts.v2.Wallet/ImportAccounts", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { - return srv.(WalletServer).ImportKeystores(ctx, req.(*ImportKeystoresRequest)) + return srv.(WalletServer).ImportAccounts(ctx, req.(*ImportAccountsRequest)) } return interceptor(ctx, in, info, handler) } @@ -2884,8 +2883,8 @@ var _Wallet_serviceDesc = grpc.ServiceDesc{ Handler: _Wallet_WalletConfig_Handler, }, { - MethodName: "ImportKeystores", - Handler: _Wallet_ImportKeystores_Handler, + MethodName: "ImportAccounts", + Handler: _Wallet_ImportAccounts_Handler, }, { MethodName: "ValidateKeystores", diff --git a/proto/prysm/v1alpha1/validator-client/web_api.pb.gw.go b/proto/prysm/v1alpha1/validator-client/web_api.pb.gw.go index 5dc8e95165..de21c313aa 100755 --- a/proto/prysm/v1alpha1/validator-client/web_api.pb.gw.go +++ b/proto/prysm/v1alpha1/validator-client/web_api.pb.gw.go @@ -90,8 +90,8 @@ func local_request_Wallet_WalletConfig_0(ctx context.Context, marshaler runtime. } -func request_Wallet_ImportKeystores_0(ctx context.Context, marshaler runtime.Marshaler, client WalletClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ImportKeystoresRequest +func request_Wallet_ImportAccounts_0(ctx context.Context, marshaler runtime.Marshaler, client WalletClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ImportAccountsRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -102,13 +102,13 @@ func request_Wallet_ImportKeystores_0(ctx context.Context, marshaler runtime.Mar return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := client.ImportKeystores(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + msg, err := client.ImportAccounts(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) return msg, metadata, err } -func local_request_Wallet_ImportKeystores_0(ctx context.Context, marshaler runtime.Marshaler, server WalletServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { - var protoReq ImportKeystoresRequest +func local_request_Wallet_ImportAccounts_0(ctx context.Context, marshaler runtime.Marshaler, server WalletServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ImportAccountsRequest var metadata runtime.ServerMetadata newReader, berr := utilities.IOReaderFactory(req.Body) @@ -119,7 +119,7 @@ func local_request_Wallet_ImportKeystores_0(ctx context.Context, marshaler runti return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) } - msg, err := server.ImportKeystores(ctx, &protoReq) + msg, err := server.ImportAccounts(ctx, &protoReq) return msg, metadata, err } @@ -738,18 +738,18 @@ func RegisterWalletHandlerServer(ctx context.Context, mux *runtime.ServeMux, ser }) - mux.Handle("POST", pattern_Wallet_ImportKeystores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_Wallet_ImportAccounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() var stream runtime.ServerTransportStream ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.validator.accounts.v2.Wallet/ImportKeystores") + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.validator.accounts.v2.Wallet/ImportAccounts") if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := local_request_Wallet_ImportKeystores_0(rctx, inboundMarshaler, server, req, pathParams) + resp, md, err := local_request_Wallet_ImportAccounts_0(rctx, inboundMarshaler, server, req, pathParams) md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { @@ -757,7 +757,7 @@ func RegisterWalletHandlerServer(ctx context.Context, mux *runtime.ServeMux, ser return } - forward_Wallet_ImportKeystores_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Wallet_ImportAccounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1338,23 +1338,23 @@ func RegisterWalletHandlerClient(ctx context.Context, mux *runtime.ServeMux, cli }) - mux.Handle("POST", pattern_Wallet_ImportKeystores_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + mux.Handle("POST", pattern_Wallet_ImportAccounts_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { ctx, cancel := context.WithCancel(req.Context()) defer cancel() inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) - rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.validator.accounts.v2.Wallet/ImportKeystores") + rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.validator.accounts.v2.Wallet/ImportAccounts") if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - resp, md, err := request_Wallet_ImportKeystores_0(rctx, inboundMarshaler, client, req, pathParams) + resp, md, err := request_Wallet_ImportAccounts_0(rctx, inboundMarshaler, client, req, pathParams) ctx = runtime.NewServerMetadataContext(ctx, md) if err != nil { runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) return } - forward_Wallet_ImportKeystores_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + forward_Wallet_ImportAccounts_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) }) @@ -1406,7 +1406,7 @@ var ( pattern_Wallet_WalletConfig_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2}, []string{"v2", "validator", "wallet"}, "")) - pattern_Wallet_ImportKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v2", "validator", "wallet", "keystores", "import"}, "")) + pattern_Wallet_ImportAccounts_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v2", "validator", "wallet", "keystores", "import"}, "")) pattern_Wallet_ValidateKeystores_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 2, 4}, []string{"v2", "validator", "wallet", "keystores", "validate"}, "")) @@ -1418,7 +1418,7 @@ var ( forward_Wallet_WalletConfig_0 = runtime.ForwardResponseMessage - forward_Wallet_ImportKeystores_0 = runtime.ForwardResponseMessage + forward_Wallet_ImportAccounts_0 = runtime.ForwardResponseMessage forward_Wallet_ValidateKeystores_0 = runtime.ForwardResponseMessage diff --git a/proto/prysm/v1alpha1/validator-client/web_api.proto b/proto/prysm/v1alpha1/validator-client/web_api.proto index ceb873d92b..dbebd117d5 100644 --- a/proto/prysm/v1alpha1/validator-client/web_api.proto +++ b/proto/prysm/v1alpha1/validator-client/web_api.proto @@ -26,7 +26,7 @@ service Wallet { get: "/v2/validator/wallet" }; } - rpc ImportKeystores(ImportKeystoresRequest) returns (ImportKeystoresResponse) { + rpc ImportAccounts(ImportAccountsRequest) returns (ImportAccountsResponse) { option (google.api.http) = { post: "/v2/validator/wallet/keystores/import", body: "*" @@ -308,7 +308,7 @@ message HasWalletResponse { bool wallet_exists = 1; } -message ImportKeystoresRequest { +message ImportAccountsRequest { // JSON-encoded keystore files to import during wallet creation. repeated string keystores_imported = 1; @@ -316,7 +316,7 @@ message ImportKeystoresRequest { string keystores_password = 2; } -message ImportKeystoresResponse { +message ImportAccountsResponse { repeated bytes imported_public_keys = 1; } diff --git a/validator/accounts/accounts_import.go b/validator/accounts/accounts_import.go index 52f3f1680d..ad98b8defe 100644 --- a/validator/accounts/accounts_import.go +++ b/validator/accounts/accounts_import.go @@ -19,11 +19,11 @@ import ( "github.com/prysmaticlabs/prysm/encoding/bytesutil" "github.com/prysmaticlabs/prysm/io/file" "github.com/prysmaticlabs/prysm/io/prompt" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" "github.com/prysmaticlabs/prysm/validator/accounts/iface" "github.com/prysmaticlabs/prysm/validator/accounts/userprompt" "github.com/prysmaticlabs/prysm/validator/accounts/wallet" "github.com/prysmaticlabs/prysm/validator/keymanager" - "github.com/prysmaticlabs/prysm/validator/keymanager/imported" "github.com/urfave/cli/v2" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" ) @@ -74,7 +74,7 @@ func (fileNames byDerivationPath) Swap(i, j int) { // ImportAccountsConfig defines values to run the import accounts function. type ImportAccountsConfig struct { Keystores []*keymanager.Keystore - Keymanager *imported.Keymanager + Importer keymanager.Importer AccountPassword string } @@ -140,9 +140,9 @@ func ImportAccountsCli(cliCtx *cli.Context) error { if err != nil { return err } - k, ok := km.(*imported.Keymanager) + k, ok := km.(keymanager.Importer) if !ok { - return errors.New("only imported wallets can import more keystores") + return errors.New("keymanager cannot import keystores") } // Check if the user wishes to import a one-off, private key directly @@ -213,13 +213,22 @@ func ImportAccountsCli(cliCtx *cli.Context) error { } } fmt.Println("Importing accounts, this may take a while...") - if err := ImportAccounts(cliCtx.Context, &ImportAccountsConfig{ - Keymanager: k, + statuses, err := ImportAccounts(cliCtx.Context, &ImportAccountsConfig{ + Importer: k, Keystores: keystoresImported, AccountPassword: accountsPassword, - }); err != nil { + }) + if err != nil { return err } + for i, status := range statuses { + switch status.Status { + case ethpbservice.ImportedKeystoreStatus_DUPLICATE: + log.Warnf("Duplicate key %s found in import request, skipped", keystoresImported[i].Pubkey) + case ethpbservice.ImportedKeystoreStatus_ERROR: + log.Warnf("Could not import keystore for %s: %s", keystoresImported[i].Pubkey, status.Message) + } + } fmt.Printf( "Successfully imported %s accounts, view all of them by running `accounts list`\n", au.BrightMagenta(strconv.Itoa(len(keystoresImported))), @@ -229,17 +238,17 @@ func ImportAccountsCli(cliCtx *cli.Context) error { // ImportAccounts can import external, EIP-2335 compliant keystore.json files as // new accounts into the Prysm validator wallet. -func ImportAccounts(ctx context.Context, cfg *ImportAccountsConfig) error { - return cfg.Keymanager.ImportKeystores( +func ImportAccounts(ctx context.Context, cfg *ImportAccountsConfig) ([]*ethpbservice.ImportedKeystoreStatus, error) { + return cfg.Importer.ImportKeystores( ctx, cfg.Keystores, - cfg.AccountPassword, + []string{cfg.AccountPassword}, ) } // Imports a one-off file containing a private key as a hex string into // the Prysm validator's accounts. -func importPrivateKeyAsAccount(cliCtx *cli.Context, wallet *wallet.Wallet, km *imported.Keymanager) error { +func importPrivateKeyAsAccount(cliCtx *cli.Context, wallet *wallet.Wallet, importer keymanager.Importer) error { privKeyFile := cliCtx.String(flags.ImportPrivateKeyFileFlag.Name) fullPath, err := file.ExpandPath(privKeyFile) if err != nil { @@ -270,16 +279,24 @@ func importPrivateKeyAsAccount(cliCtx *cli.Context, wallet *wallet.Wallet, km *i if err != nil { return errors.Wrap(err, "could not encrypt private key into a keystore file") } - if err := ImportAccounts( + statuses, err := ImportAccounts( cliCtx.Context, &ImportAccountsConfig{ - Keymanager: km, + Importer: importer, AccountPassword: wallet.Password(), Keystores: []*keymanager.Keystore{keystore}, }, - ); err != nil { + ) + if err != nil { return errors.Wrap(err, "could not import keystore into wallet") } + for _, status := range statuses { + if status.Status == ethpbservice.ImportedKeystoreStatus_ERROR { + log.Warnf("Could not import keystore for %s: %s", keystore.Pubkey, status.Message) + } else if status.Status == ethpbservice.ImportedKeystoreStatus_DUPLICATE { + log.Warnf("Duplicate key %s skipped", keystore.Pubkey) + } + } fmt.Printf( "Imported account with public key %#x, view all accounts by running `accounts list`\n", au.BrightMagenta(bytesutil.Trunc(privKey.PublicKey().Marshal())), diff --git a/validator/accounts/accounts_import_test.go b/validator/accounts/accounts_import_test.go index fc38b8992a..5027697897 100644 --- a/validator/accounts/accounts_import_test.go +++ b/validator/accounts/accounts_import_test.go @@ -131,33 +131,6 @@ func TestImport_DuplicateKeys(t *testing.T) { assert.Equal(t, 1, len(keys)) } -// TestImport_NonImportedWallet is a regression test that ensures non-silent failure when importing to non-imported wallets -func TestImport_NonImportedWallet(t *testing.T) { - walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t) - keysDir := filepath.Join(t.TempDir(), "keysDir") - require.NoError(t, os.MkdirAll(keysDir, os.ModePerm)) - - cliCtx := setupWalletCtx(t, &testWalletConfig{ - walletDir: walletDir, - passwordsDir: passwordsDir, - keysDir: keysDir, - keymanagerKind: keymanager.Derived, - walletPasswordFile: passwordFilePath, - }) - _, err := CreateWalletWithKeymanager(cliCtx.Context, &CreateWalletConfig{ - WalletCfg: &wallet.Config{ - WalletDir: walletDir, - KeymanagerKind: keymanager.Derived, - WalletPassword: password, - }, - }) - require.NoError(t, err) - - // Create a key - createKeystore(t, keysDir) - require.ErrorContains(t, "only imported wallets", ImportAccountsCli(cliCtx)) -} - func TestImport_Noninteractive_RandomName(t *testing.T) { imported.ResetCaches() walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t) diff --git a/validator/accounts/accounts_list_test.go b/validator/accounts/accounts_list_test.go index 8b2c130688..0989137f4a 100644 --- a/validator/accounts/accounts_list_test.go +++ b/validator/accounts/accounts_list_test.go @@ -96,7 +96,8 @@ func TestListAccounts_ImportedKeymanager(t *testing.T) { for i := 0; i < numAccounts; i++ { keystores[i] = createRandomKeystore(t, password) } - require.NoError(t, km.ImportKeystores(cliCtx.Context, keystores, password)) + _, err = km.ImportKeystores(cliCtx.Context, keystores, []string{password}) + require.NoError(t, err) rescueStdout := os.Stdout r, writer, err := os.Pipe() diff --git a/validator/keymanager/derived/keymanager.go b/validator/keymanager/derived/keymanager.go index f2d44959fa..5565733dea 100644 --- a/validator/keymanager/derived/keymanager.go +++ b/validator/keymanager/derived/keymanager.go @@ -107,6 +107,13 @@ func (km *Keymanager) FetchValidatingPrivateKeys(ctx context.Context) ([][32]byt return km.importedKM.FetchValidatingPrivateKeys(ctx) } +// ImportKeystores for a derived keymanager. +func (km *Keymanager) ImportKeystores( + ctx context.Context, keystores []*keymanager.Keystore, passwords []string, +) ([]*ethpbservice.ImportedKeystoreStatus, error) { + return km.importedKM.ImportKeystores(ctx, keystores, passwords) +} + // DeleteKeystores for a derived keymanager. func (km *Keymanager) DeleteKeystores( ctx context.Context, publicKeys [][]byte, diff --git a/validator/keymanager/imported/BUILD.bazel b/validator/keymanager/imported/BUILD.bazel index abd80be186..dac687243f 100644 --- a/validator/keymanager/imported/BUILD.bazel +++ b/validator/keymanager/imported/BUILD.bazel @@ -24,7 +24,6 @@ go_library( "//crypto/bls:go_default_library", "//encoding/bytesutil:go_default_library", "//io/file:go_default_library", - "//io/prompt:go_default_library", "//proto/eth/service:go_default_library", "//proto/prysm/v1alpha1/validator-client:go_default_library", "//runtime/interop:go_default_library", diff --git a/validator/keymanager/imported/delete.go b/validator/keymanager/imported/delete.go index 83f0a9cefc..e50f81a2e0 100644 --- a/validator/keymanager/imported/delete.go +++ b/validator/keymanager/imported/delete.go @@ -89,5 +89,8 @@ func (km *Keymanager) DeleteKeystores( if err != nil { return nil, errors.Wrap(err, "failed to initialize key caches") } + log.WithFields(logrus.Fields{ + "publicKeys": deletedKeysStr, + }).Info("Successfully deleted validator key(s)") return statuses, nil } diff --git a/validator/keymanager/imported/delete_test.go b/validator/keymanager/imported/delete_test.go index 534725aee3..2445e3d561 100644 --- a/validator/keymanager/imported/delete_test.go +++ b/validator/keymanager/imported/delete_test.go @@ -32,7 +32,8 @@ func TestImportedKeymanager_DeleteKeystores(t *testing.T) { for i := 0; i < numAccounts; i++ { keystores[i] = createRandomKeystore(t, password) } - require.NoError(t, dr.ImportKeystores(ctx, keystores, password)) + _, err := dr.ImportKeystores(ctx, keystores, []string{password}) + require.NoError(t, err) accounts, err := dr.FetchValidatingPublicKeys(ctx) require.NoError(t, err) require.Equal(t, numAccounts, len(accounts)) diff --git a/validator/keymanager/imported/import.go b/validator/keymanager/imported/import.go index 6d5fe4ebc5..9a78ebdd7d 100644 --- a/validator/keymanager/imported/import.go +++ b/validator/keymanager/imported/import.go @@ -10,7 +10,7 @@ import ( "github.com/k0kubun/go-ansi" "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/crypto/bls" - "github.com/prysmaticlabs/prysm/io/prompt" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/schollz/progressbar/v3" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" @@ -20,26 +20,57 @@ import ( func (km *Keymanager) ImportKeystores( ctx context.Context, keystores []*keymanager.Keystore, - importsPassword string, -) error { + passwords []string, +) ([]*ethpbservice.ImportedKeystoreStatus, error) { + + var singlePasswordForAll string + if len(passwords) == 0 { + return nil, errors.New("no passwords provided for keystores") + } else if len(passwords) == 1 { + singlePasswordForAll = passwords[0] + } else { + if len(passwords) != len(keystores) { + return nil, errors.New("number of passwords does not match number of keystores") + } + } + decryptor := keystorev4.New() bar := initializeProgressBar(len(keystores), "Importing accounts...") keys := map[string]string{} + statuses := make([]*ethpbservice.ImportedKeystoreStatus, len(keystores)) var err error + for i := 0; i < len(keystores); i++ { var privKeyBytes []byte var pubKeyBytes []byte - privKeyBytes, pubKeyBytes, importsPassword, err = km.attemptDecryptKeystore(decryptor, keystores[i], importsPassword) + var passwordToUse string + if singlePasswordForAll != "" { + passwordToUse = singlePasswordForAll + } else { + passwordToUse = passwords[i] + } + privKeyBytes, pubKeyBytes, _, err = km.attemptDecryptKeystore(decryptor, keystores[i], passwordToUse) if err != nil { - return err + statuses[i] = ðpbservice.ImportedKeystoreStatus{ + Status: ethpbservice.ImportedKeystoreStatus_ERROR, + Message: err.Error(), + } + continue + } + if err := bar.Add(1); err != nil { + log.Error(err) } // if key exists prior to being added then output log that duplicate key was found if _, ok := keys[string(pubKeyBytes)]; ok { - log.Warnf("Duplicate key in import folder will be ignored: %#x", pubKeyBytes) + log.Warnf("Duplicate key in import will be ignored: %#x", pubKeyBytes) + statuses[i] = ðpbservice.ImportedKeystoreStatus{ + Status: ethpbservice.ImportedKeystoreStatus_DUPLICATE, + } + continue } keys[string(pubKeyBytes)] = string(privKeyBytes) - if err := bar.Add(1); err != nil { - return errors.Wrap(err, "could not add to progress bar") + statuses[i] = ðpbservice.ImportedKeystoreStatus{ + Status: ethpbservice.ImportedKeystoreStatus_IMPORTED, } } privKeys := make([][]byte, 0) @@ -52,13 +83,16 @@ func (km *Keymanager) ImportKeystores( // Write the accounts to disk into a single keystore. accountsKeystore, err := km.CreateAccountsKeystore(ctx, privKeys, pubKeys) if err != nil { - return err + return nil, err } encodedAccounts, err := json.MarshalIndent(accountsKeystore, "", "\t") if err != nil { - return err + return nil, err } - return km.wallet.WriteFileAtPath(ctx, AccountsPath, AccountsKeystoreFileName, encodedAccounts) + if err := km.wallet.WriteFileAtPath(ctx, AccountsPath, AccountsKeystoreFileName, encodedAccounts); err != nil { + return nil, err + } + return statuses, nil } // ImportKeypairs directly into the keymanager. @@ -86,18 +120,11 @@ func (km *Keymanager) attemptDecryptKeystore( var err error privKeyBytes, err = enc.Decrypt(keystore.Crypto, password) doesNotDecrypt := err != nil && strings.Contains(err.Error(), keymanager.IncorrectPasswordErrMsg) - for doesNotDecrypt { - password, err = prompt.PasswordPrompt( - fmt.Sprintf("Password incorrect for key 0x%s, input correct password", keystore.Pubkey), prompt.NotEmpty, + if doesNotDecrypt { + return nil, nil, "", fmt.Errorf( + "incorrect password for key 0x%s", + keystore.Pubkey, ) - if err != nil { - return nil, nil, "", fmt.Errorf("could not read keystore password: %w", err) - } - privKeyBytes, err = enc.Decrypt(keystore.Crypto, password) - doesNotDecrypt = err != nil && strings.Contains(err.Error(), keymanager.IncorrectPasswordErrMsg) - if err != nil && !strings.Contains(err.Error(), keymanager.IncorrectPasswordErrMsg) { - return nil, nil, "", errors.Wrap(err, "could not decrypt keystore") - } } if err != nil && !strings.Contains(err.Error(), keymanager.IncorrectPasswordErrMsg) { return nil, nil, "", errors.Wrap(err, "could not decrypt keystore") diff --git a/validator/keymanager/imported/import_test.go b/validator/keymanager/imported/import_test.go index cc4a0e2fe3..909d6e5100 100644 --- a/validator/keymanager/imported/import_test.go +++ b/validator/keymanager/imported/import_test.go @@ -2,18 +2,17 @@ package imported import ( "context" - "encoding/json" "fmt" - "strings" + "strconv" "testing" "github.com/google/uuid" "github.com/prysmaticlabs/prysm/crypto/bls" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" "github.com/prysmaticlabs/prysm/testing/assert" "github.com/prysmaticlabs/prysm/testing/require" mock "github.com/prysmaticlabs/prysm/validator/accounts/testing" "github.com/prysmaticlabs/prysm/validator/keymanager" - logTest "github.com/sirupsen/logrus/hooks/test" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" ) @@ -37,7 +36,7 @@ func createRandomKeystore(t testing.TB, password string) *keymanager.Keystore { } } -func TestImportedKeymanager_CreateAccountsKeystore_NoDuplicates(t *testing.T) { +func TestImportedKeymanager_NoDuplicates(t *testing.T) { numKeys := 50 pubKeys := make([][]byte, numKeys) privKeys := make([][]byte, numKeys) @@ -95,6 +94,7 @@ func TestImportedKeymanager_CreateAccountsKeystore_NoDuplicates(t *testing.T) { } func TestImportedKeymanager_ImportKeystores(t *testing.T) { + ctx := context.Background() // Setup the keymanager. wallet := &mock.Wallet{ Files: make(map[string]map[string][]byte), @@ -105,50 +105,99 @@ func TestImportedKeymanager_ImportKeystores(t *testing.T) { accountsStore: &accountStore{}, } - // Create a duplicate keystore and attempt to import it. This should complete correctly though log specific output. - numAccounts := 5 - keystores := make([]*keymanager.Keystore, numAccounts+1) - for i := 1; i < numAccounts+1; i++ { - keystores[i] = createRandomKeystore(t, password) - } - keystores[0] = keystores[1] - ctx := context.Background() - hook := logTest.NewGlobal() - require.NoError(t, dr.ImportKeystores( - ctx, - keystores, - password, - )) - require.LogsContain(t, hook, "Duplicate key") - // Import them correctly even without the duplicate. - require.NoError(t, dr.ImportKeystores( - ctx, - keystores[1:], - password, - )) - - // Ensure the single, all-encompassing accounts keystore was written - // to the wallet and ensure we can decrypt it using the EIP-2335 standard. - var encodedKeystore []byte - for k, v := range wallet.Files[AccountsPath] { - if strings.Contains(k, "keystore") { - encodedKeystore = v + t.Run("no passwords provided", func(t *testing.T) { + _, err := dr.ImportKeystores(ctx, nil, nil) + require.ErrorContains(t, "no passwords provided", err) + }) + t.Run("number of passwords does not match number of keystores", func(t *testing.T) { + _, err := dr.ImportKeystores( + ctx, + []*keymanager.Keystore{{}, {}}, + []string{"foo", "bar", "baz"}, + ) + require.ErrorContains(t, "number of passwords does not match", err) + }) + t.Run("single password used to decrypt all keystores", func(t *testing.T) { + numKeystores := 5 + keystores := make([]*keymanager.Keystore, numKeystores) + for i := 0; i < numKeystores; i++ { + keystores[i] = createRandomKeystore(t, password) } - } - require.NotNil(t, encodedKeystore, "could not find keystore file") - keystoreFile := &keymanager.Keystore{} - require.NoError(t, json.Unmarshal(encodedKeystore, keystoreFile)) + statuses, err := dr.ImportKeystores( + ctx, + keystores, + []string{password}, + ) + require.NoError(t, err) + require.Equal(t, numKeystores, len(statuses)) + for _, status := range statuses { + require.Equal(t, ethpbservice.ImportedKeystoreStatus_IMPORTED, status.Status) + } + }) + t.Run("each imported keystore with a different password succeeds", func(t *testing.T) { + numKeystores := 5 + keystores := make([]*keymanager.Keystore, numKeystores) + passwords := make([]string, numKeystores) + for i := 0; i < numKeystores; i++ { + pass := password + strconv.Itoa(i) + keystores[i] = createRandomKeystore(t, pass) + passwords[i] = pass + } + statuses, err := dr.ImportKeystores( + ctx, + keystores, + passwords, + ) + require.NoError(t, err) + require.Equal(t, numKeystores, len(statuses)) + for _, status := range statuses { + require.Equal(t, ethpbservice.ImportedKeystoreStatus_IMPORTED, status.Status) + } + }) + t.Run("some succeed, some fail to decrypt, some duplicated", func(t *testing.T) { + keystores := make([]*keymanager.Keystore, 0) + passwords := make([]string, 0) - // We decrypt the crypto fields of the accounts keystore. - decryptor := keystorev4.New() - encodedAccounts, err := decryptor.Decrypt(keystoreFile.Crypto, password) - require.NoError(t, err, "Could not decrypt validator accounts") - store := &accountStore{} - require.NoError(t, json.Unmarshal(encodedAccounts, store)) + // First keystore is normal. + keystore1 := createRandomKeystore(t, password) + keystores = append(keystores, keystore1) + passwords = append(passwords, password) - // We should have successfully imported all accounts - // from external sources into a single AccountsStore - // struct preserved within a single keystore file. - assert.Equal(t, numAccounts, len(store.PublicKeys)) - assert.Equal(t, numAccounts, len(store.PrivateKeys)) + // Second keystore is a duplicate of the first. + keystores = append(keystores, keystore1) + passwords = append(passwords, password) + + // Third keystore has a wrong password. + keystore3 := createRandomKeystore(t, password) + keystores = append(keystores, keystore3) + passwords = append(passwords, "foobar") + + statuses, err := dr.ImportKeystores( + ctx, + keystores, + passwords, + ) + require.NoError(t, err) + require.Equal(t, len(keystores), len(statuses)) + require.Equal( + t, + ethpbservice.ImportedKeystoreStatus_IMPORTED, + statuses[0].Status, + ) + require.Equal( + t, + ethpbservice.ImportedKeystoreStatus_DUPLICATE, + statuses[1].Status, + ) + require.Equal( + t, + ethpbservice.ImportedKeystoreStatus_ERROR, + statuses[2].Status, + ) + require.Equal( + t, + fmt.Sprintf("incorrect password for key 0x%s", keystores[2].Pubkey), + statuses[2].Message, + ) + }) } diff --git a/validator/keymanager/imported/keymanager_test.go b/validator/keymanager/imported/keymanager_test.go index 194e11a0e5..40ea0c16de 100644 --- a/validator/keymanager/imported/keymanager_test.go +++ b/validator/keymanager/imported/keymanager_test.go @@ -99,7 +99,8 @@ func TestImportedKeymanager_Sign(t *testing.T) { for i := 0; i < numAccounts; i++ { keystores[i] = createRandomKeystore(t, password) } - require.NoError(t, dr.ImportKeystores(ctx, keystores, password)) + _, err := dr.ImportKeystores(ctx, keystores, []string{password}) + require.NoError(t, err) var encodedKeystore []byte for k, v := range wallet.Files[AccountsPath] { diff --git a/validator/keymanager/types.go b/validator/keymanager/types.go index 69fe29e392..3688cd98ed 100644 --- a/validator/keymanager/types.go +++ b/validator/keymanager/types.go @@ -35,7 +35,9 @@ type Signer interface { // Importer can import new keystores into the keymanager. type Importer interface { - ImportKeystores(ctx context.Context, keystores []*Keystore, importsPassword string) error + ImportKeystores( + ctx context.Context, keystores []*Keystore, passwords []string, + ) ([]*ethpbservice.ImportedKeystoreStatus, error) } // Deleter can delete keystores from the keymanager. diff --git a/validator/keymanager/types_test.go b/validator/keymanager/types_test.go index 71e765875e..82bfbe4703 100644 --- a/validator/keymanager/types_test.go +++ b/validator/keymanager/types_test.go @@ -16,6 +16,7 @@ var ( _ = keymanager.KeysFetcher(&imported.Keymanager{}) _ = keymanager.KeysFetcher(&derived.Keymanager{}) _ = keymanager.Importer(&imported.Keymanager{}) + _ = keymanager.Importer(&derived.Keymanager{}) _ = keymanager.Deleter(&imported.Keymanager{}) _ = keymanager.Deleter(&derived.Keymanager{}) ) diff --git a/validator/rpc/server.go b/validator/rpc/server.go index 032e9e160d..9dfff48b80 100644 --- a/validator/rpc/server.go +++ b/validator/rpc/server.go @@ -14,6 +14,7 @@ import ( "github.com/prysmaticlabs/prysm/async/event" "github.com/prysmaticlabs/prysm/io/logs" "github.com/prysmaticlabs/prysm/monitoring/tracing" + ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" pb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client" @@ -180,6 +181,7 @@ func (s *Server) Start() { validatorpb.RegisterHealthServer(s.grpcServer, s) validatorpb.RegisterBeaconServer(s.grpcServer, s) validatorpb.RegisterAccountsServer(s.grpcServer, s) + ethpbservice.RegisterKeyManagementServer(s.grpcServer, s) validatorpb.RegisterSlashingProtectionServer(s.grpcServer, s) go func() { diff --git a/validator/rpc/standard_api.go b/validator/rpc/standard_api.go index a5433eb5c7..10baf63609 100644 --- a/validator/rpc/standard_api.go +++ b/validator/rpc/standard_api.go @@ -1,6 +1,7 @@ package rpc import ( + "bytes" "context" "encoding/json" "fmt" @@ -39,6 +40,41 @@ func (s *Server) ListKeystores( }, nil } +// ImportKeystores allows for importing keystores into Prysm with their slashing protection history. +func (s *Server) ImportKeystores( + ctx context.Context, req *ethpbservice.ImportKeystoresRequest, +) (*ethpbservice.ImportKeystoresResponse, error) { + if !s.walletInitialized { + return nil, status.Error(codes.Internal, "Wallet not ready") + } + importer, ok := s.keymanager.(keymanager.Importer) + if !ok { + return nil, status.Error(codes.Internal, "Keymanager kind cannot import keys") + } + keystores := make([]*keymanager.Keystore, len(req.Keystores)) + for i := 0; i < len(req.Keystores); i++ { + k := &keymanager.Keystore{} + if err := json.Unmarshal([]byte(req.Keystores[i]), k); err != nil { + return nil, status.Errorf( + codes.Internal, "Invalid keystore at index %d in request: %v", i, err, + ) + } + keystores[i] = k + } + if req.SlashingProtection != "" { + if err := slashingprotection.ImportStandardProtectionJSON( + ctx, s.valDB, bytes.NewBuffer([]byte(req.SlashingProtection)), + ); err != nil { + return nil, status.Errorf(codes.Internal, "Could not import slashing protection JSON: %v", err) + } + } + statuses, err := importer.ImportKeystores(ctx, keystores, req.Passwords) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not import keystores: %v", err) + } + return ðpbservice.ImportKeystoresResponse{Statuses: statuses}, nil +} + // DeleteKeystores allows for deleting specified public keys from Prysm. func (s *Server) DeleteKeystores( ctx context.Context, req *ethpbservice.DeleteKeystoresRequest, diff --git a/validator/rpc/standard_api_test.go b/validator/rpc/standard_api_test.go index de717e0d5c..53e3f1854d 100644 --- a/validator/rpc/standard_api_test.go +++ b/validator/rpc/standard_api_test.go @@ -2,11 +2,15 @@ package rpc import ( "context" + "encoding/hex" "encoding/json" "fmt" "testing" "github.com/golang/protobuf/ptypes/empty" + "github.com/google/uuid" + "github.com/prysmaticlabs/prysm/crypto/bls" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client" "github.com/prysmaticlabs/prysm/testing/require" @@ -17,6 +21,7 @@ import ( "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/prysmaticlabs/prysm/validator/keymanager/derived" mocks "github.com/prysmaticlabs/prysm/validator/testing" + keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" ) func TestServer_ListKeystores(t *testing.T) { @@ -76,6 +81,112 @@ func TestServer_ListKeystores(t *testing.T) { }) } +func TestServer_ImportKeystores(t *testing.T) { + t.Run("wallet not ready", func(t *testing.T) { + s := Server{} + _, err := s.ImportKeystores(context.Background(), nil) + require.ErrorContains(t, "Wallet not ready", err) + }) + + ctx := context.Background() + localWalletDir := setupWalletDir(t) + defaultWalletPath = localWalletDir + w, err := accounts.CreateWalletWithKeymanager(ctx, &accounts.CreateWalletConfig{ + WalletCfg: &wallet.Config{ + WalletDir: defaultWalletPath, + KeymanagerKind: keymanager.Derived, + WalletPassword: strongPass, + }, + SkipMnemonicConfirm: true, + }) + require.NoError(t, err) + km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false}) + require.NoError(t, err) + + s := &Server{ + keymanager: km, + walletInitialized: true, + wallet: w, + } + t.Run("prevents importing if faulty keystore in request", func(t *testing.T) { + _, err := s.ImportKeystores(context.Background(), ðpbservice.ImportKeystoresRequest{ + Keystores: []string{"hi"}, + Passwords: []string{"hi"}, + }) + require.NotNil(t, err) + }) + t.Run("prevents importing if faulty slashing protection data", func(t *testing.T) { + numKeystores := 5 + password := "12345678" + encodedKeystores := make([]string, numKeystores) + for i := 0; i < numKeystores; i++ { + enc, err := json.Marshal(createRandomKeystore(t, password)) + encodedKeystores[i] = string(enc) + require.NoError(t, err) + } + _, err := s.ImportKeystores(context.Background(), ðpbservice.ImportKeystoresRequest{ + Keystores: encodedKeystores, + Passwords: []string{password}, + SlashingProtection: "foobar", + }) + require.NotNil(t, err) + }) + t.Run("returns proper statuses for keystores in request", func(t *testing.T) { + numKeystores := 5 + password := "12345678" + keystores := make([]*keymanager.Keystore, numKeystores) + publicKeys := make([][48]byte, numKeystores) + for i := 0; i < numKeystores; i++ { + keystores[i] = createRandomKeystore(t, password) + pubKey, err := hex.DecodeString(keystores[i].Pubkey) + require.NoError(t, err) + publicKeys[i] = bytesutil.ToBytes48(pubKey) + } + + // Create a validator database. + validatorDB, err := kv.NewKVStore(ctx, defaultWalletPath, &kv.Config{ + PubKeys: publicKeys, + }) + require.NoError(t, err) + s.valDB = validatorDB + + // Have to close it after import is done otherwise it complains db is not open. + defer func() { + require.NoError(t, validatorDB.Close()) + }() + encodedKeystores := make([]string, numKeystores) + for i := 0; i < numKeystores; i++ { + enc, err := json.Marshal(keystores[i]) + require.NoError(t, err) + encodedKeystores[i] = string(enc) + } + + // Generate mock slashing history. + attestingHistory := make([][]*kv.AttestationRecord, 0) + proposalHistory := make([]kv.ProposalHistoryForPubkey, len(publicKeys)) + for i := 0; i < len(publicKeys); i++ { + proposalHistory[i].Proposals = make([]kv.Proposal, 0) + } + mockJSON, err := mocks.MockSlashingProtectionJSON(publicKeys, attestingHistory, proposalHistory) + require.NoError(t, err) + + // JSON encode the protection JSON and save it. + encodedSlashingProtection, err := json.Marshal(mockJSON) + require.NoError(t, err) + + resp, err := s.ImportKeystores(context.Background(), ðpbservice.ImportKeystoresRequest{ + Keystores: encodedKeystores, + Passwords: []string{password}, + SlashingProtection: string(encodedSlashingProtection), + }) + require.NoError(t, err) + require.Equal(t, numKeystores, len(resp.Statuses)) + for _, status := range resp.Statuses { + require.Equal(t, ethpbservice.ImportedKeystoreStatus_IMPORTED, status.Status) + } + }) +} + func TestServer_DeleteKeystores(t *testing.T) { ctx := context.Background() t.Run("wallet not ready", func(t *testing.T) { @@ -171,3 +282,21 @@ func TestServer_DeleteKeystores(t *testing.T) { } require.Equal(t, numAccounts, len(mockJSON.Data)) } + +func createRandomKeystore(t testing.TB, password string) *keymanager.Keystore { + encryptor := keystorev4.New() + id, err := uuid.NewRandom() + require.NoError(t, err) + validatingKey, err := bls.RandKey() + require.NoError(t, err) + pubKey := validatingKey.PublicKey().Marshal() + cryptoFields, err := encryptor.Encrypt(validatingKey.Marshal(), password) + require.NoError(t, err) + return &keymanager.Keystore{ + Crypto: cryptoFields, + Pubkey: fmt.Sprintf("%x", pubKey), + ID: id.String(), + Version: encryptor.Version(), + Name: encryptor.Name(), + } +} diff --git a/validator/rpc/wallet.go b/validator/rpc/wallet.go index 060de08645..043a3c3203 100644 --- a/validator/rpc/wallet.go +++ b/validator/rpc/wallet.go @@ -18,7 +18,6 @@ import ( "github.com/prysmaticlabs/prysm/validator/accounts/iface" "github.com/prysmaticlabs/prysm/validator/accounts/wallet" "github.com/prysmaticlabs/prysm/validator/keymanager" - "github.com/prysmaticlabs/prysm/validator/keymanager/imported" "github.com/tyler-smith/go-bip39" "github.com/tyler-smith/go-bip39/wordlists" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" @@ -251,17 +250,17 @@ func (s *Server) ValidateKeystores( return &emptypb.Empty{}, nil } -// ImportKeystores allows importing new keystores via RPC into the wallet +// ImportAccounts allows importing new keystores via RPC into the wallet // which will be decrypted using the specified password . -func (s *Server) ImportKeystores( - ctx context.Context, req *pb.ImportKeystoresRequest, -) (*pb.ImportKeystoresResponse, error) { +func (s *Server) ImportAccounts( + ctx context.Context, req *pb.ImportAccountsRequest, +) (*pb.ImportAccountsResponse, error) { if s.wallet == nil { return nil, status.Error(codes.FailedPrecondition, "No wallet initialized") } - km, ok := s.keymanager.(*imported.Keymanager) + km, ok := s.keymanager.(keymanager.Importer) if !ok { - return nil, status.Error(codes.FailedPrecondition, "Only imported wallets can import more keystores") + return nil, status.Error(codes.FailedPrecondition, "Only imported wallets can import keystores") } if req.KeystoresPassword == "" { return nil, status.Error(codes.InvalidArgument, "Password required for keystores") @@ -286,15 +285,16 @@ func (s *Server) ImportKeystores( importedPubKeys[i] = pubKey } // Import the uploaded accounts. - if err := accounts.ImportAccounts(ctx, &accounts.ImportAccountsConfig{ - Keymanager: km, + _, err := accounts.ImportAccounts(ctx, &accounts.ImportAccountsConfig{ + Importer: km, Keystores: keystores, AccountPassword: req.KeystoresPassword, - }); err != nil { + }) + if err != nil { return nil, err } s.walletInitializedFeed.Send(s.wallet) - return &pb.ImportKeystoresResponse{ + return &pb.ImportAccountsResponse{ ImportedPublicKeys: importedPubKeys, }, nil } diff --git a/validator/rpc/wallet_test.go b/validator/rpc/wallet_test.go index 9e81c6d7b4..51c28c9d5d 100644 --- a/validator/rpc/wallet_test.go +++ b/validator/rpc/wallet_test.go @@ -47,11 +47,11 @@ func TestServer_CreateWallet_Imported(t *testing.T) { _, err := s.CreateWallet(ctx, req) require.NoError(t, err) - importReq := &pb.ImportKeystoresRequest{ + importReq := &pb.ImportAccountsRequest{ KeystoresPassword: strongPass, KeystoresImported: []string{"badjson"}, } - _, err = s.ImportKeystores(ctx, importReq) + _, err = s.ImportAccounts(ctx, importReq) require.ErrorContains(t, "Not a valid EIP-2335 keystore", err) encryptor := keystorev4.New() @@ -76,7 +76,7 @@ func TestServer_CreateWallet_Imported(t *testing.T) { keystores[i] = string(encodedFile) } importReq.KeystoresImported = keystores - _, err = s.ImportKeystores(ctx, importReq) + _, err = s.ImportAccounts(ctx, importReq) require.NoError(t, err) } @@ -311,30 +311,7 @@ func TestServer_WalletConfig(t *testing.T) { }) } -func TestServer_ImportKeystores_FailedPreconditions_WrongKeymanagerKind(t *testing.T) { - localWalletDir := setupWalletDir(t) - defaultWalletPath = localWalletDir - ctx := context.Background() - w, err := accounts.CreateWalletWithKeymanager(ctx, &accounts.CreateWalletConfig{ - WalletCfg: &wallet.Config{ - WalletDir: defaultWalletPath, - KeymanagerKind: keymanager.Derived, - WalletPassword: strongPass, - }, - SkipMnemonicConfirm: true, - }) - require.NoError(t, err) - km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false}) - require.NoError(t, err) - ss := &Server{ - wallet: w, - keymanager: km, - } - _, err = ss.ImportKeystores(ctx, &pb.ImportKeystoresRequest{}) - assert.ErrorContains(t, "Only imported wallets can import more", err) -} - -func TestServer_ImportKeystores_FailedPreconditions(t *testing.T) { +func TestServer_ImportAccounts_FailedPreconditions(t *testing.T) { localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir ctx := context.Background() @@ -352,23 +329,23 @@ func TestServer_ImportKeystores_FailedPreconditions(t *testing.T) { ss := &Server{ keymanager: km, } - _, err = ss.ImportKeystores(ctx, &pb.ImportKeystoresRequest{}) + _, err = ss.ImportAccounts(ctx, &pb.ImportAccountsRequest{}) assert.ErrorContains(t, "No wallet initialized", err) ss.wallet = w - _, err = ss.ImportKeystores(ctx, &pb.ImportKeystoresRequest{}) + _, err = ss.ImportAccounts(ctx, &pb.ImportAccountsRequest{}) assert.ErrorContains(t, "Password required for keystores", err) - _, err = ss.ImportKeystores(ctx, &pb.ImportKeystoresRequest{ + _, err = ss.ImportAccounts(ctx, &pb.ImportAccountsRequest{ KeystoresPassword: strongPass, }) assert.ErrorContains(t, "No keystores included for import", err) - _, err = ss.ImportKeystores(ctx, &pb.ImportKeystoresRequest{ + _, err = ss.ImportAccounts(ctx, &pb.ImportAccountsRequest{ KeystoresPassword: strongPass, KeystoresImported: []string{"badjson"}, }) assert.ErrorContains(t, "Not a valid EIP-2335 keystore", err) } -func TestServer_ImportKeystores_OK(t *testing.T) { +func TestServer_ImportAccounts_OK(t *testing.T) { imported.ResetCaches() localWalletDir := setupWalletDir(t) defaultWalletPath = localWalletDir @@ -421,12 +398,12 @@ func TestServer_ImportKeystores_OK(t *testing.T) { assert.Equal(t, 0, len(keys)) // Import the 3 keystores and verify the wallet has 3 new accounts. - res, err := ss.ImportKeystores(ctx, &pb.ImportKeystoresRequest{ + res, err := ss.ImportAccounts(ctx, &pb.ImportAccountsRequest{ KeystoresPassword: strongPass, KeystoresImported: keystores, }) require.NoError(t, err) - assert.DeepEqual(t, &pb.ImportKeystoresResponse{ + assert.DeepEqual(t, &pb.ImportAccountsResponse{ ImportedPublicKeys: pubKeys, }, res) @@ -510,7 +487,7 @@ func createImportedWalletWithAccounts(t testing.TB, numAccounts int) (*Server, [ keystores[i] = string(encodedFile) pubKeys[i] = privKey.PublicKey().Marshal() } - _, err = s.ImportKeystores(ctx, &pb.ImportKeystoresRequest{ + _, err = s.ImportAccounts(ctx, &pb.ImportAccountsRequest{ KeystoresImported: keystores, KeystoresPassword: strongPass, }) From fed004686be118c25ccd600e45f79d08063de352 Mon Sep 17 00:00:00 2001 From: Potuz Date: Wed, 24 Nov 2021 16:35:45 -0300 Subject: [PATCH 23/45] Add verbosity to aggregation logs (#9937) --- beacon-chain/monitor/process_attestation.go | 9 ++++++++- beacon-chain/monitor/process_attestation_test.go | 4 ++-- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/beacon-chain/monitor/process_attestation.go b/beacon-chain/monitor/process_attestation.go index 77a2f9dae7..70a1fa0e0e 100644 --- a/beacon-chain/monitor/process_attestation.go +++ b/beacon-chain/monitor/process_attestation.go @@ -197,7 +197,14 @@ func (s *Service) processAggregatedAttestation(ctx context.Context, att *ethpb.A defer s.Unlock() if s.trackedIndex(att.AggregatorIndex) { log.WithFields(logrus.Fields{ - "ValidatorIndex": att.AggregatorIndex, + "AggregatorIndex": att.AggregatorIndex, + "Slot": att.Aggregate.Data.Slot, + "BeaconBlockRoot": fmt.Sprintf("%#x", bytesutil.Trunc( + att.Aggregate.Data.BeaconBlockRoot)), + "SourceRoot:": fmt.Sprintf("%#x", bytesutil.Trunc( + att.Aggregate.Data.Source.Root)), + "TargetRoot:": fmt.Sprintf("%#x", bytesutil.Trunc( + att.Aggregate.Data.Target.Root)), }).Info("Processed attestation aggregation") aggregatedPerf := s.aggregatedPerformance[att.AggregatorIndex] aggregatedPerf.totalAggregations++ diff --git a/beacon-chain/monitor/process_attestation_test.go b/beacon-chain/monitor/process_attestation_test.go index 0ff19e2ba1..6aca6b2896 100644 --- a/beacon-chain/monitor/process_attestation_test.go +++ b/beacon-chain/monitor/process_attestation_test.go @@ -211,7 +211,7 @@ func TestProcessAggregatedAttestationStateNotCached(t *testing.T) { }, } s.processAggregatedAttestation(ctx, att) - require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor") + require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x000000000000 Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor") require.LogsContain(t, hook, "Skipping agregated attestation due to state not found in cache") logrus.SetLevel(logrus.InfoLevel) } @@ -249,7 +249,7 @@ func TestProcessAggregatedAttestationStateCached(t *testing.T) { require.NoError(t, s.config.StateGen.SaveState(ctx, root, state)) s.processAggregatedAttestation(ctx, att) - require.LogsContain(t, hook, "\"Processed attestation aggregation\" ValidatorIndex=2 prefix=monitor") + require.LogsContain(t, hook, "\"Processed attestation aggregation\" AggregatorIndex=2 BeaconBlockRoot=0x68656c6c6f2d Slot=1 SourceRoot:=0x68656c6c6f2d TargetRoot:=0x68656c6c6f2d prefix=monitor") require.LogsContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=2 prefix=monitor") require.LogsDoNotContain(t, hook, "\"Processed aggregated attestation\" Head=0x68656c6c6f2d Slot=1 Source=0x68656c6c6f2d Target=0x68656c6c6f2d ValidatorIndex=12 prefix=monitor") } From c9d5b4ba0e0069338dbe56b705009054c2417e57 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 24 Nov 2021 14:26:17 -0800 Subject: [PATCH 24/45] Add merge beacon block wrappers (#9906) --- .../prysm/v1alpha1/block/block_interfaces.go | 2 + proto/prysm/v1alpha1/cloners.go | 68 ++++ proto/prysm/v1alpha1/cloners_test.go | 81 +++++ proto/prysm/v1alpha1/wrapper/beacon_block.go | 288 +++++++++++++++- .../v1alpha1/wrapper/beacon_block_test.go | 317 +++++++++++++++++- testing/util/block.go | 65 ++++ 6 files changed, 814 insertions(+), 7 deletions(-) diff --git a/proto/prysm/v1alpha1/block/block_interfaces.go b/proto/prysm/v1alpha1/block/block_interfaces.go index 37287d7901..cc8a067612 100644 --- a/proto/prysm/v1alpha1/block/block_interfaces.go +++ b/proto/prysm/v1alpha1/block/block_interfaces.go @@ -17,6 +17,7 @@ type SignedBeaconBlock interface { Proto() proto.Message PbPhase0Block() (*ethpb.SignedBeaconBlock, error) PbAltairBlock() (*ethpb.SignedBeaconBlockAltair, error) + PbMergeBlock() (*ethpb.SignedBeaconBlockMerge, error) ssz.Marshaler ssz.Unmarshaler Version() int @@ -54,4 +55,5 @@ type BeaconBlockBody interface { IsNil() bool HashTreeRoot() ([32]byte, error) Proto() proto.Message + ExecutionPayload() (*ethpb.ExecutionPayload, error) } diff --git a/proto/prysm/v1alpha1/cloners.go b/proto/prysm/v1alpha1/cloners.go index f6710e7bf1..47d597f3a1 100644 --- a/proto/prysm/v1alpha1/cloners.go +++ b/proto/prysm/v1alpha1/cloners.go @@ -377,6 +377,74 @@ func CopySyncAggregate(a *SyncAggregate) *SyncAggregate { } } +// CopySignedBeaconBlockMerge copies the provided SignedBeaconBlockMerge. +func CopySignedBeaconBlockMerge(sigBlock *SignedBeaconBlockMerge) *SignedBeaconBlockMerge { + if sigBlock == nil { + return nil + } + return &SignedBeaconBlockMerge{ + Block: CopyBeaconBlockMerge(sigBlock.Block), + Signature: bytesutil.SafeCopyBytes(sigBlock.Signature), + } +} + +// CopyBeaconBlockMerge copies the provided BeaconBlockMerge. +func CopyBeaconBlockMerge(block *BeaconBlockMerge) *BeaconBlockMerge { + if block == nil { + return nil + } + return &BeaconBlockMerge{ + Slot: block.Slot, + ProposerIndex: block.ProposerIndex, + ParentRoot: bytesutil.SafeCopyBytes(block.ParentRoot), + StateRoot: bytesutil.SafeCopyBytes(block.StateRoot), + Body: CopyBeaconBlockBodyMerge(block.Body), + } +} + +// CopyBeaconBlockBodyMerge copies the provided BeaconBlockBodyMerge. +func CopyBeaconBlockBodyMerge(body *BeaconBlockBodyMerge) *BeaconBlockBodyMerge { + if body == nil { + return nil + } + return &BeaconBlockBodyMerge{ + RandaoReveal: bytesutil.SafeCopyBytes(body.RandaoReveal), + Eth1Data: CopyETH1Data(body.Eth1Data), + Graffiti: bytesutil.SafeCopyBytes(body.Graffiti), + ProposerSlashings: CopyProposerSlashings(body.ProposerSlashings), + AttesterSlashings: CopyAttesterSlashings(body.AttesterSlashings), + Attestations: CopyAttestations(body.Attestations), + Deposits: CopyDeposits(body.Deposits), + VoluntaryExits: CopySignedVoluntaryExits(body.VoluntaryExits), + SyncAggregate: CopySyncAggregate(body.SyncAggregate), + ExecutionPayload: CopyExecutionPayload(body.ExecutionPayload), + } +} + +// CopyExecutionPayload copies the provided ApplicationPayload. +func CopyExecutionPayload(payload *ExecutionPayload) *ExecutionPayload { + if payload == nil { + return nil + } + + return &ExecutionPayload{ + ParentHash: bytesutil.SafeCopyBytes(payload.ParentHash), + FeeRecipient: bytesutil.SafeCopyBytes(payload.FeeRecipient), + StateRoot: bytesutil.SafeCopyBytes(payload.StateRoot), + ReceiptRoot: bytesutil.SafeCopyBytes(payload.ReceiptRoot), + LogsBloom: bytesutil.SafeCopyBytes(payload.LogsBloom), + Random: bytesutil.SafeCopyBytes(payload.Random), + BlockNumber: payload.BlockNumber, + GasLimit: payload.GasLimit, + GasUsed: payload.GasUsed, + Timestamp: payload.Timestamp, + ExtraData: bytesutil.SafeCopyBytes(payload.ExtraData), + BaseFeePerGas: bytesutil.SafeCopyBytes(payload.BaseFeePerGas), + BlockHash: bytesutil.SafeCopyBytes(payload.BlockHash), + Transactions: bytesutil.SafeCopy2dBytes(payload.Transactions), + } +} + // CopyExecutionPayloadHeader copies the provided execution payload object. func CopyExecutionPayloadHeader(payload *ExecutionPayloadHeader) *ExecutionPayloadHeader { if payload == nil { diff --git a/proto/prysm/v1alpha1/cloners_test.go b/proto/prysm/v1alpha1/cloners_test.go index 6368036720..c14e0a2cbe 100644 --- a/proto/prysm/v1alpha1/cloners_test.go +++ b/proto/prysm/v1alpha1/cloners_test.go @@ -318,6 +318,36 @@ func TestCopyPayloadHeader(t *testing.T) { assert.NotEmpty(t, got, "Copied execution payload header has empty fields") } +func TestCopySignedBeaconBlockMerge(t *testing.T) { + sbb := genSignedBeaconBlockMerge() + + got := v1alpha1.CopySignedBeaconBlockMerge(sbb) + if !reflect.DeepEqual(got, sbb) { + t.Errorf("CopySignedBeaconBlockMerge() = %v, want %v", got, sbb) + } + assert.NotEmpty(t, sbb, "Copied signed beacon block Merge has empty fields") +} + +func TestCopyBeaconBlockMerge(t *testing.T) { + b := genBeaconBlockMerge() + + got := v1alpha1.CopyBeaconBlockMerge(b) + if !reflect.DeepEqual(got, b) { + t.Errorf("CopyBeaconBlockMerge() = %v, want %v", got, b) + } + assert.NotEmpty(t, b, "Copied beacon block Merge has empty fields") +} + +func TestCopyBeaconBlockBodyMerge(t *testing.T) { + bb := genBeaconBlockBodyMerge() + + got := v1alpha1.CopyBeaconBlockBodyMerge(bb) + if !reflect.DeepEqual(got, bb) { + t.Errorf("CopyBeaconBlockBodyMerge() = %v, want %v", got, bb) + } + assert.NotEmpty(t, bb, "Copied beacon block body Merge has empty fields") +} + func bytes() []byte { b := make([]byte, 32) _, err := rand.Read(b) @@ -574,6 +604,38 @@ func genSignedBeaconBlockAltair() *v1alpha1.SignedBeaconBlockAltair { } } +func genBeaconBlockBodyMerge() *v1alpha1.BeaconBlockBodyMerge { + return &v1alpha1.BeaconBlockBodyMerge{ + RandaoReveal: bytes(), + Eth1Data: genEth1Data(), + Graffiti: bytes(), + ProposerSlashings: genProposerSlashings(5), + AttesterSlashings: genAttesterSlashings(5), + Attestations: genAttestations(10), + Deposits: genDeposits(5), + VoluntaryExits: genSignedVoluntaryExits(12), + SyncAggregate: genSyncAggregate(), + ExecutionPayload: genPayload(), + } +} + +func genBeaconBlockMerge() *v1alpha1.BeaconBlockMerge { + return &v1alpha1.BeaconBlockMerge{ + Slot: 123455, + ProposerIndex: 55433, + ParentRoot: bytes(), + StateRoot: bytes(), + Body: genBeaconBlockBodyMerge(), + } +} + +func genSignedBeaconBlockMerge() *v1alpha1.SignedBeaconBlockMerge { + return &v1alpha1.SignedBeaconBlockMerge{ + Block: genBeaconBlockMerge(), + Signature: bytes(), + } +} + func genSyncCommitteeMessage() *v1alpha1.SyncCommitteeMessage { return &v1alpha1.SyncCommitteeMessage{ Slot: 424555, @@ -583,6 +645,25 @@ func genSyncCommitteeMessage() *v1alpha1.SyncCommitteeMessage { } } +func genPayload() *v1alpha1.ExecutionPayload { + return &v1alpha1.ExecutionPayload{ + ParentHash: bytes(), + FeeRecipient: bytes(), + StateRoot: bytes(), + ReceiptRoot: bytes(), + LogsBloom: bytes(), + Random: bytes(), + BlockNumber: 1, + GasLimit: 2, + GasUsed: 3, + Timestamp: 4, + ExtraData: bytes(), + BaseFeePerGas: bytes(), + BlockHash: bytes(), + Transactions: [][]byte{{'a'}, {'b'}, {'c'}}, + } +} + func genPayloadHeader() *v1alpha1.ExecutionPayloadHeader { return &v1alpha1.ExecutionPayloadHeader{ ParentHash: bytes(), diff --git a/proto/prysm/v1alpha1/wrapper/beacon_block.go b/proto/prysm/v1alpha1/wrapper/beacon_block.go index 830bc71ce5..384d2b7458 100644 --- a/proto/prysm/v1alpha1/wrapper/beacon_block.go +++ b/proto/prysm/v1alpha1/wrapper/beacon_block.go @@ -84,11 +84,16 @@ func (w Phase0SignedBeaconBlock) PbPhase0Block() (*eth.SignedBeaconBlock, error) return w.b, nil } -// PbAltairBlock returns the underlying protobuf object. +// PbAltairBlock is a stub. func (w Phase0SignedBeaconBlock) PbAltairBlock() (*eth.SignedBeaconBlockAltair, error) { return nil, errors.New("unsupported altair block") } +// PbMergeBlock is a stub. +func (w Phase0SignedBeaconBlock) PbMergeBlock() (*eth.SignedBeaconBlockMerge, error) { + return nil, errors.New("unsupported merge block") +} + // Version of the underlying protobuf object. func (w Phase0SignedBeaconBlock) Version() int { return version.Phase0 @@ -264,6 +269,11 @@ func (w Phase0BeaconBlockBody) Proto() proto.Message { return w.b } +// ExecutionPayload is a stub. +func (w Phase0BeaconBlockBody) ExecutionPayload() (*eth.ExecutionPayload, error) { + return nil, errors.New("ExecutionPayload is not supported in phase 0 block body") +} + var ( // ErrUnsupportedPhase0Block is returned when accessing a phase0 block from an altair wrapped // block. @@ -350,6 +360,11 @@ func (w altairSignedBeaconBlock) PbPhase0Block() (*eth.SignedBeaconBlock, error) return nil, ErrUnsupportedPhase0Block } +// PbMergeBlock is a stub. +func (w altairSignedBeaconBlock) PbMergeBlock() (*eth.SignedBeaconBlockMerge, error) { + return nil, errors.New("unsupported merge block") +} + // Version of the underlying protobuf object. func (w altairSignedBeaconBlock) Version() int { return version.Altair @@ -532,3 +547,274 @@ func (w altairBeaconBlockBody) HashTreeRoot() ([32]byte, error) { func (w altairBeaconBlockBody) Proto() proto.Message { return w.b } + +// ExecutionPayload is a stub. +func (w altairBeaconBlockBody) ExecutionPayload() (*eth.ExecutionPayload, error) { + return nil, errors.New("ExecutionPayload is not supported in altair block body") +} + +// mergeSignedBeaconBlock is a convenience wrapper around a merge beacon block +// object. This wrapper allows us to conform to a common interface so that beacon +// blocks for future forks can also be applied across prysm without issues. +type mergeSignedBeaconBlock struct { + b *eth.SignedBeaconBlockMerge +} + +// WrappedMergeSignedBeaconBlock is constructor which wraps a protobuf merge block with the block wrapper. +func WrappedMergeSignedBeaconBlock(b *eth.SignedBeaconBlockMerge) (block.SignedBeaconBlock, error) { + w := mergeSignedBeaconBlock{b: b} + if w.IsNil() { + return nil, ErrNilObjectWrapped + } + return w, nil +} + +// Signature returns the respective block signature. +func (w mergeSignedBeaconBlock) Signature() []byte { + return w.b.Signature +} + +// Block returns the underlying beacon block object. +func (w mergeSignedBeaconBlock) Block() block.BeaconBlock { + return mergeBeaconBlock{b: w.b.Block} +} + +// IsNil checks if the underlying beacon block is nil. +func (w mergeSignedBeaconBlock) IsNil() bool { + return w.b == nil || w.b.Block == nil +} + +// Copy performs a deep copy of the signed beacon block object. +func (w mergeSignedBeaconBlock) Copy() block.SignedBeaconBlock { + return mergeSignedBeaconBlock{b: eth.CopySignedBeaconBlockMerge(w.b)} +} + +// MarshalSSZ marshals the signed beacon block to its relevant ssz form. +func (w mergeSignedBeaconBlock) MarshalSSZ() ([]byte, error) { + return w.b.MarshalSSZ() +} + +// MarshalSSZTo marshals the signed beacon block to its relevant ssz +// form to the provided byte buffer. +func (w mergeSignedBeaconBlock) MarshalSSZTo(dst []byte) ([]byte, error) { + return w.b.MarshalSSZTo(dst) +} + +// SizeSSZ returns the size of serialized signed block +func (w mergeSignedBeaconBlock) SizeSSZ() int { + return w.b.SizeSSZ() +} + +// UnmarshalSSZ unmarshalls the signed beacon block from its relevant ssz +// form. +func (w mergeSignedBeaconBlock) UnmarshalSSZ(buf []byte) error { + return w.b.UnmarshalSSZ(buf) +} + +// Proto returns the block in its underlying protobuf interface. +func (w mergeSignedBeaconBlock) Proto() proto.Message { + return w.b +} + +// PbMergeBlock returns the underlying protobuf object. +func (w mergeSignedBeaconBlock) PbMergeBlock() (*eth.SignedBeaconBlockMerge, error) { + return w.b, nil +} + +// PbPhase0Block is a stub. +func (w mergeSignedBeaconBlock) PbPhase0Block() (*eth.SignedBeaconBlock, error) { + return nil, ErrUnsupportedPhase0Block +} + +// PbAltairBlock returns the underlying protobuf object. +func (w mergeSignedBeaconBlock) PbAltairBlock() (*eth.SignedBeaconBlockAltair, error) { + return nil, errors.New("unsupported altair block") +} + +// Version of the underlying protobuf object. +func (w mergeSignedBeaconBlock) Version() int { + return version.Merge +} + +func (w mergeSignedBeaconBlock) Header() (*eth.SignedBeaconBlockHeader, error) { + root, err := w.b.Block.Body.HashTreeRoot() + if err != nil { + return nil, errors.Wrapf(err, "could not hash block") + } + + return ð.SignedBeaconBlockHeader{ + Header: ð.BeaconBlockHeader{ + Slot: w.b.Block.Slot, + ProposerIndex: w.b.Block.ProposerIndex, + ParentRoot: w.b.Block.ParentRoot, + StateRoot: w.b.Block.StateRoot, + BodyRoot: root[:], + }, + Signature: w.Signature(), + }, nil +} + +// mergeBeaconBlock is the wrapper for the actual block. +type mergeBeaconBlock struct { + b *eth.BeaconBlockMerge +} + +// WrappedMergeBeaconBlock is constructor which wraps a protobuf merge object +// with the block wrapper. +func WrappedMergeBeaconBlock(b *eth.BeaconBlockMerge) (block.BeaconBlock, error) { + w := mergeBeaconBlock{b: b} + if w.IsNil() { + return nil, ErrNilObjectWrapped + } + return w, nil +} + +// Slot returns the respective slot of the block. +func (w mergeBeaconBlock) Slot() types.Slot { + return w.b.Slot +} + +// ProposerIndex returns proposer index of the beacon block. +func (w mergeBeaconBlock) ProposerIndex() types.ValidatorIndex { + return w.b.ProposerIndex +} + +// ParentRoot returns the parent root of beacon block. +func (w mergeBeaconBlock) ParentRoot() []byte { + return w.b.ParentRoot +} + +// StateRoot returns the state root of the beacon block. +func (w mergeBeaconBlock) StateRoot() []byte { + return w.b.StateRoot +} + +// Body returns the underlying block body. +func (w mergeBeaconBlock) Body() block.BeaconBlockBody { + return mergeBeaconBlockBody{b: w.b.Body} +} + +// IsNil checks if the beacon block is nil. +func (w mergeBeaconBlock) IsNil() bool { + return w.b == nil +} + +// HashTreeRoot returns the ssz root of the block. +func (w mergeBeaconBlock) HashTreeRoot() ([32]byte, error) { + return w.b.HashTreeRoot() +} + +// MarshalSSZ marshals the block into its respective +// ssz form. +func (w mergeBeaconBlock) MarshalSSZ() ([]byte, error) { + return w.b.MarshalSSZ() +} + +// MarshalSSZTo marshals the beacon block to its relevant ssz +// form to the provided byte buffer. +func (w mergeBeaconBlock) MarshalSSZTo(dst []byte) ([]byte, error) { + return w.b.MarshalSSZTo(dst) +} + +// SizeSSZ returns the size of serialized block. +func (w mergeBeaconBlock) SizeSSZ() int { + return w.b.SizeSSZ() +} + +// UnmarshalSSZ unmarshalls the beacon block from its relevant ssz +// form. +func (w mergeBeaconBlock) UnmarshalSSZ(buf []byte) error { + return w.b.UnmarshalSSZ(buf) +} + +// Proto returns the underlying block object in its +// proto form. +func (w mergeBeaconBlock) Proto() proto.Message { + return w.b +} + +// Version of the underlying protobuf object. +func (w mergeBeaconBlock) Version() int { + return version.Merge +} + +// mergeBeaconBlockBody is a wrapper of a beacon block body. +type mergeBeaconBlockBody struct { + b *eth.BeaconBlockBodyMerge +} + +// WrappedMergeBeaconBlockBody is constructor which wraps a protobuf merge object +// with the block wrapper. +func WrappedMergeBeaconBlockBody(b *eth.BeaconBlockBodyMerge) (block.BeaconBlockBody, error) { + w := mergeBeaconBlockBody{b: b} + if w.IsNil() { + return nil, ErrNilObjectWrapped + } + return w, nil +} + +// RandaoReveal returns the randao reveal from the block body. +func (w mergeBeaconBlockBody) RandaoReveal() []byte { + return w.b.RandaoReveal +} + +// Eth1Data returns the eth1 data in the block. +func (w mergeBeaconBlockBody) Eth1Data() *eth.Eth1Data { + return w.b.Eth1Data +} + +// Graffiti returns the graffiti in the block. +func (w mergeBeaconBlockBody) Graffiti() []byte { + return w.b.Graffiti +} + +// ProposerSlashings returns the proposer slashings in the block. +func (w mergeBeaconBlockBody) ProposerSlashings() []*eth.ProposerSlashing { + return w.b.ProposerSlashings +} + +// AttesterSlashings returns the attester slashings in the block. +func (w mergeBeaconBlockBody) AttesterSlashings() []*eth.AttesterSlashing { + return w.b.AttesterSlashings +} + +// Attestations returns the stored attestations in the block. +func (w mergeBeaconBlockBody) Attestations() []*eth.Attestation { + return w.b.Attestations +} + +// Deposits returns the stored deposits in the block. +func (w mergeBeaconBlockBody) Deposits() []*eth.Deposit { + return w.b.Deposits +} + +// VoluntaryExits returns the voluntary exits in the block. +func (w mergeBeaconBlockBody) VoluntaryExits() []*eth.SignedVoluntaryExit { + return w.b.VoluntaryExits +} + +// SyncAggregate returns the sync aggregate in the block. +func (w mergeBeaconBlockBody) SyncAggregate() (*eth.SyncAggregate, error) { + return w.b.SyncAggregate, nil +} + +// IsNil checks if the block body is nil. +func (w mergeBeaconBlockBody) IsNil() bool { + return w.b == nil +} + +// HashTreeRoot returns the ssz root of the block body. +func (w mergeBeaconBlockBody) HashTreeRoot() ([32]byte, error) { + return w.b.HashTreeRoot() +} + +// Proto returns the underlying proto form of the block +// body. +func (w mergeBeaconBlockBody) Proto() proto.Message { + return w.b +} + +// ExecutionPayload returns the Execution payload of the block body. +func (w mergeBeaconBlockBody) ExecutionPayload() (*eth.ExecutionPayload, error) { + return w.b.ExecutionPayload, nil +} diff --git a/proto/prysm/v1alpha1/wrapper/beacon_block_test.go b/proto/prysm/v1alpha1/wrapper/beacon_block_test.go index 710085ce2b..f54bf9faf3 100644 --- a/proto/prysm/v1alpha1/wrapper/beacon_block_test.go +++ b/proto/prysm/v1alpha1/wrapper/beacon_block_test.go @@ -336,15 +336,15 @@ func TestPhase0SignedBeaconBlock_Header(t *testing.T) { assert.DeepEqual(t, signature, header.Signature) } -func TestAltairSignedBeaconBlock_Header(t *testing.T) { +func TestMergeSignedBeaconBlock_Header(t *testing.T) { root := bytesutil.PadTo([]byte("root"), 32) signature := bytesutil.PadTo([]byte("sig"), 96) - body := ðpb.BeaconBlockBodyAltair{} - body = util.HydrateBeaconBlockBodyAltair(body) + body := ðpb.BeaconBlockBodyMerge{} + body = util.HydrateBeaconBlockBodyMerge(body) bodyRoot, err := body.HashTreeRoot() require.NoError(t, err) - block := ðpb.SignedBeaconBlockAltair{ - Block: ðpb.BeaconBlockAltair{ + block := ðpb.SignedBeaconBlockMerge{ + Block: ðpb.BeaconBlockMerge{ Slot: 1, ProposerIndex: 1, ParentRoot: root, @@ -353,7 +353,7 @@ func TestAltairSignedBeaconBlock_Header(t *testing.T) { }, Signature: signature, } - wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(block) + wrapped, err := wrapper.WrappedMergeSignedBeaconBlock(block) require.NoError(t, err) header, err := wrapped.Header() @@ -365,3 +365,308 @@ func TestAltairSignedBeaconBlock_Header(t *testing.T) { assert.DeepEqual(t, root, header.Header.ParentRoot) assert.DeepEqual(t, signature, header.Signature) } + +func TestMergeSignedBeaconBlock_Signature(t *testing.T) { + sig := []byte{0x11, 0x22} + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{}, Signature: sig}) + require.NoError(t, err) + + if !bytes.Equal(sig, wsb.Signature()) { + t.Error("Wrong signature returned") + } +} + +func TestMergeSignedBeaconBlock_Block(t *testing.T) { + blk := ðpb.BeaconBlockMerge{Slot: 54} + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: blk}) + require.NoError(t, err) + + assert.DeepEqual(t, blk, wsb.Block().Proto()) +} + +func TestMergeSignedBeaconBlock_IsNil(t *testing.T) { + _, err := wrapper.WrappedMergeSignedBeaconBlock(nil) + require.Equal(t, wrapper.ErrNilObjectWrapped, err) + + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{}}) + require.NoError(t, err) + + assert.Equal(t, false, wsb.IsNil()) +} + +func TestMergeSignedBeaconBlock_Copy(t *testing.T) { + t.Skip("TODO: Missing mutation evaluation helpers") +} + +func TestMergeSignedBeaconBlock_Proto(t *testing.T) { + sb := ðpb.SignedBeaconBlockMerge{ + Block: ðpb.BeaconBlockMerge{Slot: 66}, + Signature: []byte{0x11, 0x22}, + } + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(sb) + require.NoError(t, err) + + assert.Equal(t, sb, wsb.Proto()) +} + +func TestMergeSignedBeaconBlock_PbPhase0Block(t *testing.T) { + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{}}) + require.NoError(t, err) + + if _, err := wsb.PbPhase0Block(); err != wrapper.ErrUnsupportedPhase0Block { + t.Errorf("Wrong error returned. Want %v got %v", wrapper.ErrUnsupportedPhase0Block, err) + } +} + +func TestMergeSignedBeaconBlock_PbMergeBlock(t *testing.T) { + sb := ðpb.SignedBeaconBlockMerge{ + Block: ðpb.BeaconBlockMerge{Slot: 66}, + Signature: []byte{0x11, 0x22}, + } + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(sb) + require.NoError(t, err) + + got, err := wsb.PbMergeBlock() + assert.NoError(t, err) + assert.Equal(t, sb, got) +} + +func TestMergeSignedBeaconBlock_MarshalSSZTo(t *testing.T) { + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(util.HydrateSignedBeaconBlockMerge(ðpb.SignedBeaconBlockMerge{})) + assert.NoError(t, err) + + var b []byte + b, err = wsb.MarshalSSZTo(b) + assert.NoError(t, err) + assert.NotEqual(t, 0, len(b)) +} + +func TestMergeSignedBeaconBlock_SSZ(t *testing.T) { + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(util.HydrateSignedBeaconBlockMerge(ðpb.SignedBeaconBlockMerge{})) + assert.NoError(t, err) + + b, err := wsb.MarshalSSZ() + assert.NoError(t, err) + assert.NotEqual(t, 0, len(b)) + + assert.NotEqual(t, 0, wsb.SizeSSZ()) + + assert.NoError(t, wsb.UnmarshalSSZ(b)) +} + +func TestMergeSignedBeaconBlock_Version(t *testing.T) { + wsb, err := wrapper.WrappedMergeSignedBeaconBlock(ðpb.SignedBeaconBlockMerge{Block: ðpb.BeaconBlockMerge{}}) + require.NoError(t, err) + + assert.Equal(t, version.Merge, wsb.Version()) +} + +func TestMergeBeaconBlock_Slot(t *testing.T) { + slot := types.Slot(546) + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{Slot: slot}) + require.NoError(t, err) + + assert.Equal(t, slot, wb.Slot()) +} + +func TestMergeBeaconBlock_ProposerIndex(t *testing.T) { + pi := types.ValidatorIndex(555) + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{ProposerIndex: pi}) + require.NoError(t, err) + + assert.Equal(t, pi, wb.ProposerIndex()) +} + +func TestMergeBeaconBlock_ParentRoot(t *testing.T) { + root := []byte{0xAA, 0xBF, 0x33, 0x01} + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{ParentRoot: root}) + require.NoError(t, err) + + assert.DeepEqual(t, root, wb.ParentRoot()) +} + +func TestMergeBeaconBlock_StateRoot(t *testing.T) { + root := []byte{0xAA, 0xBF, 0x33, 0x01} + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{StateRoot: root}) + require.NoError(t, err) + + assert.DeepEqual(t, root, wb.StateRoot()) +} + +func TestMergeBeaconBlock_Body(t *testing.T) { + body := ðpb.BeaconBlockBodyMerge{Graffiti: []byte{0x44}} + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{Body: body}) + require.NoError(t, err) + + assert.Equal(t, body, wb.Body().Proto()) +} + +func TestMergeBeaconBlock_IsNil(t *testing.T) { + _, err := wrapper.WrappedMergeBeaconBlock(nil) + require.Equal(t, wrapper.ErrNilObjectWrapped, err) + + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{}) + require.NoError(t, err) + + assert.Equal(t, false, wb.IsNil()) +} + +func TestMergeBeaconBlock_HashTreeRoot(t *testing.T) { + wb, err := wrapper.WrappedMergeBeaconBlock(util.HydrateBeaconBlockMerge(ðpb.BeaconBlockMerge{})) + require.NoError(t, err) + + rt, err := wb.HashTreeRoot() + assert.NoError(t, err) + assert.NotEmpty(t, rt) +} + +func TestMergeBeaconBlock_Proto(t *testing.T) { + blk := ðpb.BeaconBlockMerge{ProposerIndex: 234} + wb, err := wrapper.WrappedMergeBeaconBlock(blk) + require.NoError(t, err) + + assert.Equal(t, blk, wb.Proto()) +} + +func TestMergeBeaconBlock_SSZ(t *testing.T) { + wb, err := wrapper.WrappedMergeBeaconBlock(util.HydrateBeaconBlockMerge(ðpb.BeaconBlockMerge{})) + assert.NoError(t, err) + + b, err := wb.MarshalSSZ() + assert.NoError(t, err) + assert.NotEqual(t, 0, len(b)) + + assert.NotEqual(t, 0, wb.SizeSSZ()) + + assert.NoError(t, wb.UnmarshalSSZ(b)) +} + +func TestMergeBeaconBlock_Version(t *testing.T) { + wb, err := wrapper.WrappedMergeBeaconBlock(ðpb.BeaconBlockMerge{}) + require.NoError(t, err) + + assert.Equal(t, version.Merge, wb.Version()) +} + +func TestMergeBeaconBlockBody_RandaoReveal(t *testing.T) { + root := []byte{0xAA, 0xBF, 0x33, 0x01} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(ðpb.BeaconBlockBodyMerge{RandaoReveal: root}) + require.NoError(t, err) + + assert.DeepEqual(t, root, wbb.RandaoReveal()) +} + +func TestMergeBeaconBlockBody_Eth1Data(t *testing.T) { + data := &v1alpha1.Eth1Data{} + body := ðpb.BeaconBlockBodyMerge{ + Eth1Data: data, + } + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + assert.Equal(t, data, wbb.Eth1Data()) +} + +func TestMergeBeaconBlockBody_Graffiti(t *testing.T) { + graffiti := []byte{0x66, 0xAA} + body := ðpb.BeaconBlockBodyMerge{Graffiti: graffiti} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.DeepEqual(t, graffiti, wbb.Graffiti()) +} + +func TestMergeBeaconBlockBody_ProposerSlashings(t *testing.T) { + ps := []*v1alpha1.ProposerSlashing{ + {Header_1: &v1alpha1.SignedBeaconBlockHeader{ + Signature: []byte{0x11, 0x20}, + }}, + } + body := ðpb.BeaconBlockBodyMerge{ProposerSlashings: ps} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.DeepEqual(t, ps, wbb.ProposerSlashings()) +} + +func TestMergeBeaconBlockBody_AttesterSlashings(t *testing.T) { + as := []*v1alpha1.AttesterSlashing{ + {Attestation_1: &v1alpha1.IndexedAttestation{Signature: []byte{0x11}}}, + } + body := ðpb.BeaconBlockBodyMerge{AttesterSlashings: as} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.DeepEqual(t, as, wbb.AttesterSlashings()) +} + +func TestMergeBeaconBlockBody_Attestations(t *testing.T) { + atts := []*v1alpha1.Attestation{{Signature: []byte{0x88}}} + + body := ðpb.BeaconBlockBodyMerge{Attestations: atts} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.DeepEqual(t, atts, wbb.Attestations()) +} + +func TestMergeBeaconBlockBody_Deposits(t *testing.T) { + deposits := []*v1alpha1.Deposit{ + {Proof: [][]byte{{0x54, 0x10}}}, + } + body := ðpb.BeaconBlockBodyMerge{Deposits: deposits} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.DeepEqual(t, deposits, wbb.Deposits()) +} + +func TestMergeBeaconBlockBody_VoluntaryExits(t *testing.T) { + exits := []*v1alpha1.SignedVoluntaryExit{ + {Exit: &v1alpha1.VoluntaryExit{Epoch: 54}}, + } + body := ðpb.BeaconBlockBodyMerge{VoluntaryExits: exits} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.DeepEqual(t, exits, wbb.VoluntaryExits()) +} + +func TestMergeBeaconBlockBody_IsNil(t *testing.T) { + _, err := wrapper.WrappedMergeBeaconBlockBody(nil) + require.Equal(t, wrapper.ErrNilObjectWrapped, err) + + wbb, err := wrapper.WrappedMergeBeaconBlockBody(ðpb.BeaconBlockBodyMerge{}) + require.NoError(t, err) + assert.Equal(t, false, wbb.IsNil()) + +} + +func TestMergeBeaconBlockBody_HashTreeRoot(t *testing.T) { + wb, err := wrapper.WrappedMergeBeaconBlockBody(util.HydrateBeaconBlockBodyMerge(ðpb.BeaconBlockBodyMerge{})) + assert.NoError(t, err) + + rt, err := wb.HashTreeRoot() + assert.NoError(t, err) + assert.NotEmpty(t, rt) +} + +func TestMergeBeaconBlockBody_Proto(t *testing.T) { + body := ðpb.BeaconBlockBodyMerge{Graffiti: []byte{0x66, 0xAA}} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + assert.Equal(t, body, wbb.Proto()) +} + +func TestMergeBeaconBlockBody_ExecutionPayload(t *testing.T) { + payloads := &v1alpha1.ExecutionPayload{ + BlockNumber: 100, + } + body := ðpb.BeaconBlockBodyMerge{ExecutionPayload: payloads} + wbb, err := wrapper.WrappedMergeBeaconBlockBody(body) + require.NoError(t, err) + + got, err := wbb.ExecutionPayload() + require.NoError(t, err) + assert.DeepEqual(t, payloads, got) +} diff --git a/testing/util/block.go b/testing/util/block.go index 4720fe53bc..d82c53c018 100644 --- a/testing/util/block.go +++ b/testing/util/block.go @@ -612,3 +612,68 @@ func HydrateBeaconBlockBodyAltair(b *ethpb.BeaconBlockBodyAltair) *ethpb.BeaconB } return b } + +// HydrateSignedBeaconBlockMerge hydrates a signed beacon block with correct field length sizes +// to comply with fssz marshalling and unmarshalling rules. +func HydrateSignedBeaconBlockMerge(b *ethpb.SignedBeaconBlockMerge) *ethpb.SignedBeaconBlockMerge { + if b.Signature == nil { + b.Signature = make([]byte, params.BeaconConfig().BLSSignatureLength) + } + b.Block = HydrateBeaconBlockMerge(b.Block) + return b +} + +// HydrateBeaconBlockMerge hydrates a beacon block with correct field length sizes +// to comply with fssz marshalling and unmarshalling rules. +func HydrateBeaconBlockMerge(b *ethpb.BeaconBlockMerge) *ethpb.BeaconBlockMerge { + if b == nil { + b = ðpb.BeaconBlockMerge{} + } + if b.ParentRoot == nil { + b.ParentRoot = make([]byte, 32) + } + if b.StateRoot == nil { + b.StateRoot = make([]byte, 32) + } + b.Body = HydrateBeaconBlockBodyMerge(b.Body) + return b +} + +// HydrateBeaconBlockBodyMerge hydrates a beacon block body with correct field length sizes +// to comply with fssz marshalling and unmarshalling rules. +func HydrateBeaconBlockBodyMerge(b *ethpb.BeaconBlockBodyMerge) *ethpb.BeaconBlockBodyMerge { + if b == nil { + b = ðpb.BeaconBlockBodyMerge{} + } + if b.RandaoReveal == nil { + b.RandaoReveal = make([]byte, params.BeaconConfig().BLSSignatureLength) + } + if b.Graffiti == nil { + b.Graffiti = make([]byte, 32) + } + if b.Eth1Data == nil { + b.Eth1Data = ðpb.Eth1Data{ + DepositRoot: make([]byte, 32), + BlockHash: make([]byte, 32), + } + } + if b.SyncAggregate == nil { + b.SyncAggregate = ðpb.SyncAggregate{ + SyncCommitteeBits: make([]byte, 64), + SyncCommitteeSignature: make([]byte, 96), + } + } + if b.ExecutionPayload == nil { + b.ExecutionPayload = ðpb.ExecutionPayload{ + ParentHash: make([]byte, 32), + FeeRecipient: make([]byte, 20), + StateRoot: make([]byte, 32), + ReceiptRoot: make([]byte, 32), + LogsBloom: make([]byte, 256), + Random: make([]byte, 32), + BaseFeePerGas: make([]byte, 32), + BlockHash: make([]byte, 32), + } + } + return b +} From f42227aa0499c6f300d505f6fe1bf02cb201f5fd Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 25 Nov 2021 12:41:05 -0800 Subject: [PATCH 25/45] Rest of the merge state implementation (#9939) * Add rest of the state implementations * Update BUILD.bazel * Update state_trie_test.go * fix test * fix test * Update beacon-chain/state/v3/state_trie.go Co-authored-by: Potuz * Update beacon-chain/state/v3/state_trie.go Co-authored-by: Potuz * add ctx * go fmt Co-authored-by: Potuz --- beacon-chain/state/v3/BUILD.bazel | 10 + beacon-chain/state/v3/field_roots.go | 21 +- beacon-chain/state/v3/getters_state.go | 62 ++--- beacon-chain/state/v3/getters_test.go | 104 ++++++++ beacon-chain/state/v3/setters_test.go | 185 +++++++++++++ beacon-chain/state/v3/state_trie.go | 326 ++++++++++++++++++++++- beacon-chain/state/v3/state_trie_test.go | 167 ++++++++++++ 7 files changed, 835 insertions(+), 40 deletions(-) create mode 100644 beacon-chain/state/v3/setters_test.go create mode 100644 beacon-chain/state/v3/state_trie_test.go diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel index 32320e1809..94784f1858 100644 --- a/beacon-chain/state/v3/BUILD.bazel +++ b/beacon-chain/state/v3/BUILD.bazel @@ -42,6 +42,7 @@ go_library( "//beacon-chain/state/v1:go_default_library", "//config/features:go_default_library", "//config/params:go_default_library", + "//container/slice:go_default_library", "//crypto/hash:go_default_library", "//encoding/bytesutil:go_default_library", "//encoding/ssz:go_default_library", @@ -53,6 +54,7 @@ go_library( "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", + "@io_opencensus_go//trace:go_default_library", "@org_golang_google_protobuf//proto:go_default_library", ], ) @@ -66,13 +68,21 @@ go_test( "getters_block_test.go", "getters_test.go", "getters_validator_test.go", + "setters_test.go", + "state_trie_test.go", ], embed = [":go_default_library"], deps = [ + "//beacon-chain/state/stateutil:go_default_library", + "//beacon-chain/state/types:go_default_library", "//beacon-chain/state/v1:go_default_library", + "//config/features:go_default_library", + "//config/params:go_default_library", "//encoding/bytesutil:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//testing/assert:go_default_library", "//testing/require:go_default_library", + "@com_github_prysmaticlabs_eth2_types//:go_default_library", + "@com_github_prysmaticlabs_go_bitfield//:go_default_library", ], ) diff --git a/beacon-chain/state/v3/field_roots.go b/beacon-chain/state/v3/field_roots.go index 827dcb3f2a..5accf3ba3d 100644 --- a/beacon-chain/state/v3/field_roots.go +++ b/beacon-chain/state/v3/field_roots.go @@ -1,6 +1,7 @@ package v3 import ( + "context" "encoding/binary" "sync" @@ -13,6 +14,7 @@ import ( "github.com/prysmaticlabs/prysm/encoding/bytesutil" "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "go.opencensus.io/trace" ) var ( @@ -47,15 +49,17 @@ type stateRootHasher struct { // computeFieldRoots returns the hash tree root computations of every field in // the beacon state as a list of 32 byte roots. -//nolint:deadcode -func computeFieldRoots(state *ethpb.BeaconStateMerge) ([][]byte, error) { +func computeFieldRoots(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) { if features.Get().EnableSSZCache { - return cachedHasher.computeFieldRootsWithHasher(state) + return cachedHasher.computeFieldRootsWithHasher(ctx, state) } - return nocachedHasher.computeFieldRootsWithHasher(state) + return nocachedHasher.computeFieldRootsWithHasher(ctx, state) } -func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMerge) ([][]byte, error) { +func (h *stateRootHasher) computeFieldRootsWithHasher(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) { + ctx, span := trace.StartSpan(ctx, "beaconState.computeFieldRootsWithHasher") + defer span.End() + if state == nil { return nil, errors.New("nil state") } @@ -219,8 +223,11 @@ func (h *stateRootHasher) computeFieldRootsWithHasher(state *ethpb.BeaconStateMe fieldRoots[23] = nextSyncCommitteeRoot[:] // Execution payload root. - //TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65 - fieldRoots[24] = []byte{} + executionPayloadRoot, err := state.LatestExecutionPayloadHeader.HashTreeRoot() + if err != nil { + return nil, err + } + fieldRoots[24] = executionPayloadRoot[:] return fieldRoots, nil } diff --git a/beacon-chain/state/v3/getters_state.go b/beacon-chain/state/v3/getters_state.go index 9bd446c0b9..71604daae4 100644 --- a/beacon-chain/state/v3/getters_state.go +++ b/beacon-chain/state/v3/getters_state.go @@ -23,31 +23,32 @@ func (b *BeaconState) CloneInnerState() interface{} { b.lock.RLock() defer b.lock.RUnlock() - return ðpb.BeaconStateAltair{ - GenesisTime: b.genesisTime(), - GenesisValidatorsRoot: b.genesisValidatorRoot(), - Slot: b.slot(), - Fork: b.fork(), - LatestBlockHeader: b.latestBlockHeader(), - BlockRoots: b.blockRoots(), - StateRoots: b.stateRoots(), - HistoricalRoots: b.historicalRoots(), - Eth1Data: b.eth1Data(), - Eth1DataVotes: b.eth1DataVotes(), - Eth1DepositIndex: b.eth1DepositIndex(), - Validators: b.validators(), - Balances: b.balances(), - RandaoMixes: b.randaoMixes(), - Slashings: b.slashings(), - CurrentEpochParticipation: b.currentEpochParticipation(), - PreviousEpochParticipation: b.previousEpochParticipation(), - JustificationBits: b.justificationBits(), - PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(), - CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(), - FinalizedCheckpoint: b.finalizedCheckpoint(), - InactivityScores: b.inactivityScores(), - CurrentSyncCommittee: b.currentSyncCommittee(), - NextSyncCommittee: b.nextSyncCommittee(), + return ðpb.BeaconStateMerge{ + GenesisTime: b.genesisTime(), + GenesisValidatorsRoot: b.genesisValidatorRoot(), + Slot: b.slot(), + Fork: b.fork(), + LatestBlockHeader: b.latestBlockHeader(), + BlockRoots: b.blockRoots(), + StateRoots: b.stateRoots(), + HistoricalRoots: b.historicalRoots(), + Eth1Data: b.eth1Data(), + Eth1DataVotes: b.eth1DataVotes(), + Eth1DepositIndex: b.eth1DepositIndex(), + Validators: b.validators(), + Balances: b.balances(), + RandaoMixes: b.randaoMixes(), + Slashings: b.slashings(), + CurrentEpochParticipation: b.currentEpochParticipation(), + PreviousEpochParticipation: b.previousEpochParticipation(), + JustificationBits: b.justificationBits(), + PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(), + CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(), + FinalizedCheckpoint: b.finalizedCheckpoint(), + InactivityScores: b.inactivityScores(), + CurrentSyncCommittee: b.currentSyncCommittee(), + NextSyncCommittee: b.nextSyncCommittee(), + LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader(), } } @@ -112,16 +113,15 @@ func (b *BeaconState) MarshalSSZ() ([]byte, error) { if !b.hasInnerState() { return nil, errors.New("nil beacon state") } - //TODO: Blocked by https://github.com/ferranbt/fastssz/pull/65 - return []byte{}, nil + return b.state.MarshalSSZ() } -// ProtobufBeaconState transforms an input into beacon state hard fork 1 in the form of protobuf. +// ProtobufBeaconState transforms an input into beacon state Merge in the form of protobuf. // Error is returned if the input is not type protobuf beacon state. -func ProtobufBeaconState(s interface{}) (*ethpb.BeaconStateAltair, error) { - pbState, ok := s.(*ethpb.BeaconStateAltair) +func ProtobufBeaconState(s interface{}) (*ethpb.BeaconStateMerge, error) { + pbState, ok := s.(*ethpb.BeaconStateMerge) if !ok { - return nil, errors.New("input is not type pb.BeaconStateAltair") + return nil, errors.New("input is not type pb.BeaconStateMerge") } return pbState, nil } diff --git a/beacon-chain/state/v3/getters_test.go b/beacon-chain/state/v3/getters_test.go index 6cf112dbcb..c96bb51b27 100644 --- a/beacon-chain/state/v3/getters_test.go +++ b/beacon-chain/state/v3/getters_test.go @@ -5,7 +5,9 @@ import ( "sync" "testing" + types "github.com/prysmaticlabs/eth2-types" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/assert" "github.com/prysmaticlabs/prysm/testing/require" ) @@ -86,3 +88,105 @@ func TestNilState_NoPanic(t *testing.T) { _, err = st.NextSyncCommittee() _ = err } + +func TestBeaconState_ValidatorByPubkey(t *testing.T) { + keyCreator := func(input []byte) [48]byte { + nKey := [48]byte{} + copy(nKey[:1], input) + return nKey + } + + tests := []struct { + name string + modifyFunc func(b *BeaconState, k [48]byte) + exists bool + expectedIdx types.ValidatorIndex + largestIdxInSet types.ValidatorIndex + }{ + { + name: "retrieve validator", + modifyFunc: func(b *BeaconState, key [48]byte) { + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key[:]})) + }, + exists: true, + expectedIdx: 0, + }, + { + name: "retrieve validator with multiple validators from the start", + modifyFunc: func(b *BeaconState, key [48]byte) { + key1 := keyCreator([]byte{'C'}) + key2 := keyCreator([]byte{'D'}) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key[:]})) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key1[:]})) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key2[:]})) + }, + exists: true, + expectedIdx: 0, + }, + { + name: "retrieve validator with multiple validators", + modifyFunc: func(b *BeaconState, key [48]byte) { + key1 := keyCreator([]byte{'C'}) + key2 := keyCreator([]byte{'D'}) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key1[:]})) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key2[:]})) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key[:]})) + }, + exists: true, + expectedIdx: 2, + }, + { + name: "retrieve validator with multiple validators from the start with shared state", + modifyFunc: func(b *BeaconState, key [48]byte) { + key1 := keyCreator([]byte{'C'}) + key2 := keyCreator([]byte{'D'}) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key[:]})) + _ = b.Copy() + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key1[:]})) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key2[:]})) + }, + exists: true, + expectedIdx: 0, + }, + { + name: "retrieve validator with multiple validators with shared state", + modifyFunc: func(b *BeaconState, key [48]byte) { + key1 := keyCreator([]byte{'C'}) + key2 := keyCreator([]byte{'D'}) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key1[:]})) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key2[:]})) + n := b.Copy() + // Append to another state + assert.NoError(t, n.AppendValidator(ðpb.Validator{PublicKey: key[:]})) + + }, + exists: false, + expectedIdx: 0, + }, + { + name: "retrieve validator with multiple validators with shared state at boundary", + modifyFunc: func(b *BeaconState, key [48]byte) { + key1 := keyCreator([]byte{'C'}) + assert.NoError(t, b.AppendValidator(ðpb.Validator{PublicKey: key1[:]})) + n := b.Copy() + // Append to another state + assert.NoError(t, n.AppendValidator(ðpb.Validator{PublicKey: key[:]})) + + }, + exists: false, + expectedIdx: 0, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + s, err := InitializeFromProto(ðpb.BeaconStateMerge{}) + require.NoError(t, err) + nKey := keyCreator([]byte{'A'}) + tt.modifyFunc(s, nKey) + idx, ok := s.ValidatorIndexByPubkey(nKey) + assert.Equal(t, tt.exists, ok) + assert.Equal(t, tt.expectedIdx, idx) + }) + } +} diff --git a/beacon-chain/state/v3/setters_test.go b/beacon-chain/state/v3/setters_test.go new file mode 100644 index 0000000000..c5d9f6d40f --- /dev/null +++ b/beacon-chain/state/v3/setters_test.go @@ -0,0 +1,185 @@ +package v3 + +import ( + "context" + "strconv" + "testing" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/go-bitfield" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + eth "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/assert" + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestAppendBeyondIndicesLimit(t *testing.T) { + zeroHash := params.BeaconConfig().ZeroHash + mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockblockRoots); i++ { + mockblockRoots[i] = zeroHash[:] + } + + mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockstateRoots); i++ { + mockstateRoots[i] = zeroHash[:] + } + mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector) + for i := 0; i < len(mockrandaoMixes); i++ { + mockrandaoMixes[i] = zeroHash[:] + } + payload := ðpb.ExecutionPayloadHeader{ + ParentHash: make([]byte, 32), + FeeRecipient: make([]byte, 20), + StateRoot: make([]byte, 32), + ReceiptRoot: make([]byte, 32), + LogsBloom: make([]byte, 256), + Random: make([]byte, 32), + BaseFeePerGas: make([]byte, 32), + BlockHash: make([]byte, 32), + TransactionsRoot: make([]byte, 32), + } + st, err := InitializeFromProto(ðpb.BeaconStateMerge{ + Slot: 1, + CurrentEpochParticipation: []byte{}, + PreviousEpochParticipation: []byte{}, + Validators: []*eth.Validator{}, + Eth1Data: ð.Eth1Data{}, + BlockRoots: mockblockRoots, + StateRoots: mockstateRoots, + RandaoMixes: mockrandaoMixes, + LatestExecutionPayloadHeader: payload, + }) + require.NoError(t, err) + _, err = st.HashTreeRoot(context.Background()) + require.NoError(t, err) + for i := stateTypes.FieldIndex(0); i < stateTypes.FieldIndex(params.BeaconConfig().BeaconStateMergeFieldCount); i++ { + st.dirtyFields[i] = true + } + _, err = st.HashTreeRoot(context.Background()) + require.NoError(t, err) + for i := 0; i < 10; i++ { + assert.NoError(t, st.AppendValidator(ð.Validator{})) + } + assert.Equal(t, false, st.rebuildTrie[validators]) + assert.NotEqual(t, len(st.dirtyIndices[validators]), 0) + + for i := 0; i < indicesLimit; i++ { + assert.NoError(t, st.AppendValidator(ð.Validator{})) + } + assert.Equal(t, true, st.rebuildTrie[validators]) + assert.Equal(t, len(st.dirtyIndices[validators]), 0) +} + +func TestBeaconState_AppendBalanceWithTrie(t *testing.T) { + count := uint64(100) + vals := make([]*ethpb.Validator, 0, count) + bals := make([]uint64, 0, count) + for i := uint64(1); i < count; i++ { + someRoot := [32]byte{} + someKey := [48]byte{} + copy(someRoot[:], strconv.Itoa(int(i))) + copy(someKey[:], strconv.Itoa(int(i))) + vals = append(vals, ðpb.Validator{ + PublicKey: someKey[:], + WithdrawalCredentials: someRoot[:], + EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, + Slashed: false, + ActivationEligibilityEpoch: 1, + ActivationEpoch: 1, + ExitEpoch: 1, + WithdrawableEpoch: 1, + }) + bals = append(bals, params.BeaconConfig().MaxEffectiveBalance) + } + zeroHash := params.BeaconConfig().ZeroHash + mockblockRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockblockRoots); i++ { + mockblockRoots[i] = zeroHash[:] + } + + mockstateRoots := make([][]byte, params.BeaconConfig().SlotsPerHistoricalRoot) + for i := 0; i < len(mockstateRoots); i++ { + mockstateRoots[i] = zeroHash[:] + } + mockrandaoMixes := make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector) + for i := 0; i < len(mockrandaoMixes); i++ { + mockrandaoMixes[i] = zeroHash[:] + } + var pubKeys [][]byte + for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ { + pubKeys = append(pubKeys, bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength)) + } + payload := ðpb.ExecutionPayloadHeader{ + ParentHash: make([]byte, 32), + FeeRecipient: make([]byte, 20), + StateRoot: make([]byte, 32), + ReceiptRoot: make([]byte, 32), + LogsBloom: make([]byte, 256), + Random: make([]byte, 32), + BaseFeePerGas: make([]byte, 32), + BlockHash: make([]byte, 32), + TransactionsRoot: make([]byte, 32), + } + st, err := InitializeFromProto(ðpb.BeaconStateMerge{ + Slot: 1, + GenesisValidatorsRoot: make([]byte, 32), + Fork: ðpb.Fork{ + PreviousVersion: make([]byte, 4), + CurrentVersion: make([]byte, 4), + Epoch: 0, + }, + LatestBlockHeader: ðpb.BeaconBlockHeader{ + ParentRoot: make([]byte, 32), + StateRoot: make([]byte, 32), + BodyRoot: make([]byte, 32), + }, + CurrentEpochParticipation: []byte{}, + PreviousEpochParticipation: []byte{}, + Validators: vals, + Balances: bals, + Eth1Data: ð.Eth1Data{ + DepositRoot: make([]byte, 32), + BlockHash: make([]byte, 32), + }, + BlockRoots: mockblockRoots, + StateRoots: mockstateRoots, + RandaoMixes: mockrandaoMixes, + JustificationBits: bitfield.NewBitvector4(), + PreviousJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, + Slashings: make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector), + CurrentSyncCommittee: ðpb.SyncCommittee{ + Pubkeys: pubKeys, + AggregatePubkey: make([]byte, 48), + }, + NextSyncCommittee: ðpb.SyncCommittee{ + Pubkeys: pubKeys, + AggregatePubkey: make([]byte, 48), + }, + LatestExecutionPayloadHeader: payload, + }) + assert.NoError(t, err) + _, err = st.HashTreeRoot(context.Background()) + assert.NoError(t, err) + + for i := 0; i < 100; i++ { + if i%2 == 0 { + assert.NoError(t, st.UpdateBalancesAtIndex(types.ValidatorIndex(i), 1000)) + } + if i%3 == 0 { + assert.NoError(t, st.AppendBalance(1000)) + } + } + _, err = st.HashTreeRoot(context.Background()) + assert.NoError(t, err) + newRt := bytesutil.ToBytes32(st.merkleLayers[0][balances]) + wantedRt, err := stateutil.Uint64ListRootWithRegistryLimit(st.state.Balances) + assert.NoError(t, err) + assert.Equal(t, wantedRt, newRt, "state roots are unequal") +} diff --git a/beacon-chain/state/v3/state_trie.go b/beacon-chain/state/v3/state_trie.go index 227c7456c5..813d27c0c2 100644 --- a/beacon-chain/state/v3/state_trie.go +++ b/beacon-chain/state/v3/state_trie.go @@ -1,14 +1,24 @@ package v3 import ( + "context" + "runtime" + "sort" + "github.com/pkg/errors" "github.com/prometheus/client_golang/prometheus" "github.com/prometheus/client_golang/prometheus/promauto" + "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/beacon-chain/state/types" "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/container/slice" + "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "go.opencensus.io/trace" "google.golang.org/protobuf/proto" ) @@ -31,7 +41,7 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateMerge) (*BeaconState, error) return nil, errors.New("received nil state") } - fieldCount := params.BeaconConfig().BeaconStateAltairFieldCount + fieldCount := params.BeaconConfig().BeaconStateMergeFieldCount b := &BeaconState{ state: st, dirtyFields: make(map[types.FieldIndex]bool, fieldCount), @@ -65,7 +75,319 @@ func InitializeFromProtoUnsafe(st *ethpb.BeaconStateMerge) (*BeaconState, error) b.sharedFieldReferences[balances] = stateutil.NewRef(1) b.sharedFieldReferences[inactivityScores] = stateutil.NewRef(1) // New in Altair. b.sharedFieldReferences[historicalRoots] = stateutil.NewRef(1) - + b.sharedFieldReferences[latestExecutionPayloadHeader] = stateutil.NewRef(1) // New in Merge. stateCount.Inc() return b, nil } + +// Copy returns a deep copy of the beacon state. +func (b *BeaconState) Copy() state.BeaconState { + if !b.hasInnerState() { + return nil + } + + b.lock.RLock() + defer b.lock.RUnlock() + fieldCount := params.BeaconConfig().BeaconStateMergeFieldCount + + dst := &BeaconState{ + state: ðpb.BeaconStateMerge{ + // Primitive types, safe to copy. + GenesisTime: b.state.GenesisTime, + Slot: b.state.Slot, + Eth1DepositIndex: b.state.Eth1DepositIndex, + + // Large arrays, infrequently changed, constant size. + RandaoMixes: b.state.RandaoMixes, + StateRoots: b.state.StateRoots, + BlockRoots: b.state.BlockRoots, + Slashings: b.state.Slashings, + Eth1DataVotes: b.state.Eth1DataVotes, + + // Large arrays, increases over time. + Validators: b.state.Validators, + Balances: b.state.Balances, + HistoricalRoots: b.state.HistoricalRoots, + PreviousEpochParticipation: b.state.PreviousEpochParticipation, + CurrentEpochParticipation: b.state.CurrentEpochParticipation, + InactivityScores: b.state.InactivityScores, + + // Everything else, too small to be concerned about, constant size. + Fork: b.fork(), + LatestBlockHeader: b.latestBlockHeader(), + Eth1Data: b.eth1Data(), + JustificationBits: b.justificationBits(), + PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint(), + CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint(), + FinalizedCheckpoint: b.finalizedCheckpoint(), + GenesisValidatorsRoot: b.genesisValidatorRoot(), + CurrentSyncCommittee: b.currentSyncCommittee(), + NextSyncCommittee: b.nextSyncCommittee(), + LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader(), + }, + dirtyFields: make(map[types.FieldIndex]bool, fieldCount), + dirtyIndices: make(map[types.FieldIndex][]uint64, fieldCount), + rebuildTrie: make(map[types.FieldIndex]bool, fieldCount), + sharedFieldReferences: make(map[types.FieldIndex]*stateutil.Reference, 11), + stateFieldLeaves: make(map[types.FieldIndex]*fieldtrie.FieldTrie, fieldCount), + + // Copy on write validator index map. + valMapHandler: b.valMapHandler, + } + + for field, ref := range b.sharedFieldReferences { + ref.AddRef() + dst.sharedFieldReferences[field] = ref + } + + // Increment ref for validator map + b.valMapHandler.AddRef() + + for i := range b.dirtyFields { + dst.dirtyFields[i] = true + } + + for i := range b.dirtyIndices { + indices := make([]uint64, len(b.dirtyIndices[i])) + copy(indices, b.dirtyIndices[i]) + dst.dirtyIndices[i] = indices + } + + for i := range b.rebuildTrie { + dst.rebuildTrie[i] = true + } + + for fldIdx, fieldTrie := range b.stateFieldLeaves { + dst.stateFieldLeaves[fldIdx] = fieldTrie + if fieldTrie.FieldReference() != nil { + fieldTrie.Lock() + fieldTrie.FieldReference().AddRef() + fieldTrie.Unlock() + } + } + + if b.merkleLayers != nil { + dst.merkleLayers = make([][][]byte, len(b.merkleLayers)) + for i, layer := range b.merkleLayers { + dst.merkleLayers[i] = make([][]byte, len(layer)) + for j, content := range layer { + dst.merkleLayers[i][j] = make([]byte, len(content)) + copy(dst.merkleLayers[i][j], content) + } + } + } + stateCount.Inc() + // Finalizer runs when dst is being destroyed in garbage collection. + runtime.SetFinalizer(dst, func(b *BeaconState) { + for field, v := range b.sharedFieldReferences { + v.MinusRef() + if b.stateFieldLeaves[field].FieldReference() != nil { + b.stateFieldLeaves[field].FieldReference().MinusRef() + } + } + for i := 0; i < fieldCount; i++ { + field := types.FieldIndex(i) + delete(b.stateFieldLeaves, field) + delete(b.dirtyIndices, field) + delete(b.dirtyFields, field) + delete(b.sharedFieldReferences, field) + delete(b.stateFieldLeaves, field) + } + stateCount.Sub(1) + }) + + return dst +} + +// HashTreeRoot of the beacon state retrieves the Merkle root of the trie +// representation of the beacon state based on the eth2 Simple Serialize specification. +func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) { + _, span := trace.StartSpan(ctx, "BeaconStateMerge.HashTreeRoot") + defer span.End() + + b.lock.Lock() + defer b.lock.Unlock() + + if b.merkleLayers == nil || len(b.merkleLayers) == 0 { + fieldRoots, err := computeFieldRoots(ctx, b.state) + if err != nil { + return [32]byte{}, err + } + layers := stateutil.Merkleize(fieldRoots) + b.merkleLayers = layers + b.dirtyFields = make(map[types.FieldIndex]bool, params.BeaconConfig().BeaconStateMergeFieldCount) + } + + for field := range b.dirtyFields { + root, err := b.rootSelector(ctx, field) + if err != nil { + return [32]byte{}, err + } + b.merkleLayers[0][field] = root[:] + b.recomputeRoot(int(field)) + delete(b.dirtyFields, field) + } + return bytesutil.ToBytes32(b.merkleLayers[len(b.merkleLayers)-1][0]), nil +} + +// FieldReferencesCount returns the reference count held by each field. This +// also includes the field trie held by each field. +func (b *BeaconState) FieldReferencesCount() map[string]uint64 { + refMap := make(map[string]uint64) + b.lock.RLock() + defer b.lock.RUnlock() + for i, f := range b.sharedFieldReferences { + refMap[i.String(b.Version())] = uint64(f.Refs()) + } + for i, f := range b.stateFieldLeaves { + numOfRefs := uint64(f.FieldReference().Refs()) + f.RLock() + if !f.Empty() { + refMap[i.String(b.Version())+"_trie"] = numOfRefs + } + f.RUnlock() + } + return refMap +} + +// IsNil checks if the state and the underlying proto +// object are nil. +func (b *BeaconState) IsNil() bool { + return b == nil || b.state == nil +} + +func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) ([32]byte, error) { + hasher := hash.CustomSHA256Hasher() + switch field { + case genesisTime: + return ssz.Uint64Root(b.state.GenesisTime), nil + case genesisValidatorRoot: + return bytesutil.ToBytes32(b.state.GenesisValidatorsRoot), nil + case slot: + return ssz.Uint64Root(uint64(b.state.Slot)), nil + case eth1DepositIndex: + return ssz.Uint64Root(b.state.Eth1DepositIndex), nil + case fork: + return ssz.ForkRoot(b.state.Fork) + case latestBlockHeader: + return stateutil.BlockHeaderRoot(b.state.LatestBlockHeader) + case blockRoots: + if b.rebuildTrie[field] { + err := b.resetFieldTrie(field, b.state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot)) + if err != nil { + return [32]byte{}, err + } + b.dirtyIndices[field] = []uint64{} + delete(b.rebuildTrie, field) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(blockRoots, b.state.BlockRoots) + case stateRoots: + if b.rebuildTrie[field] { + err := b.resetFieldTrie(field, b.state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot)) + if err != nil { + return [32]byte{}, err + } + b.dirtyIndices[field] = []uint64{} + delete(b.rebuildTrie, field) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(stateRoots, b.state.StateRoots) + case historicalRoots: + return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) + case eth1Data: + return eth1Root(hasher, b.state.Eth1Data) + case eth1DataVotes: + if b.rebuildTrie[field] { + err := b.resetFieldTrie(field, b.state.Eth1DataVotes, uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))) + if err != nil { + return [32]byte{}, err + } + b.dirtyIndices[field] = []uint64{} + delete(b.rebuildTrie, field) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(field, b.state.Eth1DataVotes) + case validators: + if b.rebuildTrie[field] { + err := b.resetFieldTrie(field, b.state.Validators, params.BeaconConfig().ValidatorRegistryLimit) + if err != nil { + return [32]byte{}, err + } + b.dirtyIndices[validators] = []uint64{} + delete(b.rebuildTrie, validators) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(validators, b.state.Validators) + case balances: + return stateutil.Uint64ListRootWithRegistryLimit(b.state.Balances) + case randaoMixes: + if b.rebuildTrie[field] { + err := b.resetFieldTrie(field, b.state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector)) + if err != nil { + return [32]byte{}, err + } + b.dirtyIndices[field] = []uint64{} + delete(b.rebuildTrie, field) + return b.stateFieldLeaves[field].TrieRoot() + } + return b.recomputeFieldTrie(randaoMixes, b.state.RandaoMixes) + case slashings: + return ssz.SlashingsRoot(b.state.Slashings) + case previousEpochParticipationBits: + return stateutil.ParticipationBitsRoot(b.state.PreviousEpochParticipation) + case currentEpochParticipationBits: + return stateutil.ParticipationBitsRoot(b.state.CurrentEpochParticipation) + case justificationBits: + return bytesutil.ToBytes32(b.state.JustificationBits), nil + case previousJustifiedCheckpoint: + return ssz.CheckpointRoot(hasher, b.state.PreviousJustifiedCheckpoint) + case currentJustifiedCheckpoint: + return ssz.CheckpointRoot(hasher, b.state.CurrentJustifiedCheckpoint) + case finalizedCheckpoint: + return ssz.CheckpointRoot(hasher, b.state.FinalizedCheckpoint) + case inactivityScores: + return stateutil.Uint64ListRootWithRegistryLimit(b.state.InactivityScores) + case currentSyncCommittee: + return stateutil.SyncCommitteeRoot(b.state.CurrentSyncCommittee) + case nextSyncCommittee: + return stateutil.SyncCommitteeRoot(b.state.NextSyncCommittee) + case latestExecutionPayloadHeader: + return b.state.LatestExecutionPayloadHeader.HashTreeRoot() + } + return [32]byte{}, errors.New("invalid field index provided") +} + +func (b *BeaconState) recomputeFieldTrie(index types.FieldIndex, elements interface{}) ([32]byte, error) { + fTrie := b.stateFieldLeaves[index] + if fTrie.FieldReference().Refs() > 1 { + fTrie.Lock() + defer fTrie.Unlock() + fTrie.FieldReference().MinusRef() + newTrie := fTrie.CopyTrie() + b.stateFieldLeaves[index] = newTrie + fTrie = newTrie + } + // remove duplicate indexes + b.dirtyIndices[index] = slice.SetUint64(b.dirtyIndices[index]) + // sort indexes again + sort.Slice(b.dirtyIndices[index], func(i int, j int) bool { + return b.dirtyIndices[index][i] < b.dirtyIndices[index][j] + }) + root, err := fTrie.RecomputeTrie(b.dirtyIndices[index], elements) + if err != nil { + return [32]byte{}, err + } + b.dirtyIndices[index] = []uint64{} + return root, nil +} + +func (b *BeaconState) resetFieldTrie(index types.FieldIndex, elements interface{}, length uint64) error { + fTrie, err := fieldtrie.NewFieldTrie(index, fieldMap[index], elements, length) + if err != nil { + return err + } + b.stateFieldLeaves[index] = fTrie + b.dirtyIndices[index] = []uint64{} + return nil +} diff --git a/beacon-chain/state/v3/state_trie_test.go b/beacon-chain/state/v3/state_trie_test.go new file mode 100644 index 0000000000..5b72723858 --- /dev/null +++ b/beacon-chain/state/v3/state_trie_test.go @@ -0,0 +1,167 @@ +package v3 + +import ( + "strconv" + "sync" + "testing" + + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" + "github.com/prysmaticlabs/prysm/config/features" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/assert" + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestMain(m *testing.M) { + resetCfg := features.InitWithReset(&features.Flags{EnableBalanceTrieComputation: true}) + defer resetCfg() + m.Run() +} + +func TestValidatorMap_DistinctCopy(t *testing.T) { + count := uint64(100) + vals := make([]*ethpb.Validator, 0, count) + for i := uint64(1); i < count; i++ { + someRoot := [32]byte{} + someKey := [48]byte{} + copy(someRoot[:], strconv.Itoa(int(i))) + copy(someKey[:], strconv.Itoa(int(i))) + vals = append(vals, ðpb.Validator{ + PublicKey: someKey[:], + WithdrawalCredentials: someRoot[:], + EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, + Slashed: false, + ActivationEligibilityEpoch: 1, + ActivationEpoch: 1, + ExitEpoch: 1, + WithdrawableEpoch: 1, + }) + } + handler := stateutil.NewValMapHandler(vals) + newHandler := handler.Copy() + wantedPubkey := strconv.Itoa(22) + handler.Set(bytesutil.ToBytes48([]byte(wantedPubkey)), 27) + val1, _ := handler.Get(bytesutil.ToBytes48([]byte(wantedPubkey))) + val2, _ := newHandler.Get(bytesutil.ToBytes48([]byte(wantedPubkey))) + assert.NotEqual(t, val1, val2, "Values are supposed to be unequal due to copy") +} + +func TestInitializeFromProto(t *testing.T) { + type test struct { + name string + state *ethpb.BeaconStateMerge + error string + } + initTests := []test{ + { + name: "nil state", + state: nil, + error: "received nil state", + }, + { + name: "nil validators", + state: ðpb.BeaconStateMerge{ + Slot: 4, + Validators: nil, + }, + }, + { + name: "empty state", + state: ðpb.BeaconStateMerge{}, + }, + } + for _, tt := range initTests { + t.Run(tt.name, func(t *testing.T) { + _, err := InitializeFromProto(tt.state) + if tt.error != "" { + require.ErrorContains(t, tt.error, err) + } else { + require.NoError(t, err) + } + }) + } +} + +func TestBeaconState_NoDeadlock(t *testing.T) { + count := uint64(100) + vals := make([]*ethpb.Validator, 0, count) + for i := uint64(1); i < count; i++ { + someRoot := [32]byte{} + someKey := [48]byte{} + copy(someRoot[:], strconv.Itoa(int(i))) + copy(someKey[:], strconv.Itoa(int(i))) + vals = append(vals, ðpb.Validator{ + PublicKey: someKey[:], + WithdrawalCredentials: someRoot[:], + EffectiveBalance: params.BeaconConfig().MaxEffectiveBalance, + Slashed: false, + ActivationEligibilityEpoch: 1, + ActivationEpoch: 1, + ExitEpoch: 1, + WithdrawableEpoch: 1, + }) + } + st, err := InitializeFromProtoUnsafe(ðpb.BeaconStateMerge{ + Validators: vals, + }) + assert.NoError(t, err) + + wg := new(sync.WaitGroup) + + wg.Add(1) + go func() { + // Continuously lock and unlock the state + // by acquiring the lock. + for i := 0; i < 1000; i++ { + for _, f := range st.stateFieldLeaves { + f.Lock() + if f.Empty() { + f.InsertFieldLayer(make([][]*[32]byte, 10)) + } + f.Unlock() + f.FieldReference().AddRef() + } + } + wg.Done() + }() + // Constantly read from the offending portion + // of the code to ensure there is no possible + // recursive read locking. + for i := 0; i < 1000; i++ { + go func() { + _ = st.FieldReferencesCount() + }() + } + // Test will not terminate in the event of a deadlock. + wg.Wait() +} + +func TestInitializeFromProtoUnsafe(t *testing.T) { + type test struct { + name string + state *ethpb.BeaconStateMerge + error string + } + initTests := []test{ + { + name: "nil state", + state: nil, + error: "received nil state", + }, + { + name: "nil validators", + state: ðpb.BeaconStateMerge{ + Slot: 4, + Validators: nil, + }, + }, + { + name: "empty state", + state: ðpb.BeaconStateMerge{}, + }, + // TODO: Add full state. Blocked by testutil migration. + } + _ = initTests +} From 85faecf2ca826a8149c49cfe4faad4a061714ebe Mon Sep 17 00:00:00 2001 From: terence tsao Date: Fri, 26 Nov 2021 07:53:25 -0800 Subject: [PATCH 26/45] Add test utility merge state (#9944) * Add test utility merge state * gaz * gaz --- beacon-chain/state/v3/BUILD.bazel | 5 +- testing/util/BUILD.bazel | 3 ++ testing/util/merge_state.go | 83 +++++++++++++++++++++++++++++++ testing/util/merge_state_test.go | 28 +++++++++++ 4 files changed, 118 insertions(+), 1 deletion(-) create mode 100644 testing/util/merge_state.go create mode 100644 testing/util/merge_state_test.go diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel index 94784f1858..1f7735d4eb 100644 --- a/beacon-chain/state/v3/BUILD.bazel +++ b/beacon-chain/state/v3/BUILD.bazel @@ -33,7 +33,10 @@ go_library( "types.go", ], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/state/v3", - visibility = ["//beacon-chain:__pkg__"], + visibility = [ + "//beacon-chain:__subpackages__", + "//testing/util:__pkg__", + ], deps = [ "//beacon-chain/state:go_default_library", "//beacon-chain/state/fieldtrie:go_default_library", diff --git a/testing/util/BUILD.bazel b/testing/util/BUILD.bazel index 9667aa08de..4035f60600 100644 --- a/testing/util/BUILD.bazel +++ b/testing/util/BUILD.bazel @@ -10,6 +10,7 @@ go_library( "block.go", "deposits.go", "helpers.go", + "merge_state.go", "state.go", "sync_aggregate.go", "sync_committee.go", @@ -28,6 +29,7 @@ go_library( "//beacon-chain/state:go_default_library", "//beacon-chain/state/v1:go_default_library", "//beacon-chain/state/v2:go_default_library", + "//beacon-chain/state/v3:go_default_library", "//config/params:go_default_library", "//container/trie:go_default_library", "//crypto/bls:go_default_library", @@ -57,6 +59,7 @@ go_test( "block_test.go", "deposits_test.go", "helpers_test.go", + "merge_state_test.go", "state_test.go", ], embed = [":go_default_library"], diff --git a/testing/util/merge_state.go b/testing/util/merge_state.go new file mode 100644 index 0000000000..bdba409a61 --- /dev/null +++ b/testing/util/merge_state.go @@ -0,0 +1,83 @@ +package util + +import ( + "context" + "testing" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" + "github.com/prysmaticlabs/prysm/beacon-chain/state" + v3 "github.com/prysmaticlabs/prysm/beacon-chain/state/v3" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/crypto/bls" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// DeterministicGenesisStateMerge returns a genesis state in Merge format made using the deterministic deposits. +func DeterministicGenesisStateMerge(t testing.TB, numValidators uint64) (state.BeaconState, []bls.SecretKey) { + deposits, privKeys, err := DeterministicDepositsAndKeys(numValidators) + if err != nil { + t.Fatal(errors.Wrapf(err, "failed to get %d deposits", numValidators)) + } + eth1Data, err := DeterministicEth1Data(len(deposits)) + if err != nil { + t.Fatal(errors.Wrapf(err, "failed to get eth1data for %d deposits", numValidators)) + } + beaconState, err := genesisBeaconStateMerge(context.Background(), deposits, uint64(0), eth1Data) + if err != nil { + t.Fatal(errors.Wrapf(err, "failed to get genesis beacon state of %d validators", numValidators)) + } + resetCache() + return beaconState, privKeys +} + +// genesisBeaconStateMerge returns the genesis beacon state. +func genesisBeaconStateMerge(ctx context.Context, deposits []*ethpb.Deposit, genesisTime uint64, eth1Data *ethpb.Eth1Data) (state.BeaconState, error) { + st, err := emptyGenesisStateMerge() + if err != nil { + return nil, err + } + + // Process initial deposits. + st, err = helpers.UpdateGenesisEth1Data(st, deposits, eth1Data) + if err != nil { + return nil, err + } + + st, err = processPreGenesisDeposits(ctx, st, deposits) + if err != nil { + return nil, errors.Wrap(err, "could not process validator deposits") + } + + return buildGenesisBeaconState(genesisTime, st, st.Eth1Data()) +} + +// emptyGenesisStateMerge returns an empty genesis state in Merge format. +func emptyGenesisStateMerge() (state.BeaconState, error) { + st := ðpb.BeaconStateMerge{ + // Misc fields. + Slot: 0, + Fork: ðpb.Fork{ + PreviousVersion: params.BeaconConfig().GenesisForkVersion, + CurrentVersion: params.BeaconConfig().AltairForkVersion, + Epoch: 0, + }, + // Validator registry fields. + Validators: []*ethpb.Validator{}, + Balances: []uint64{}, + InactivityScores: []uint64{}, + + JustificationBits: []byte{0}, + HistoricalRoots: [][]byte{}, + CurrentEpochParticipation: []byte{}, + PreviousEpochParticipation: []byte{}, + + // Eth1 data. + Eth1Data: ðpb.Eth1Data{}, + Eth1DataVotes: []*ethpb.Eth1Data{}, + Eth1DepositIndex: 0, + + LatestExecutionPayloadHeader: ðpb.ExecutionPayloadHeader{}, + } + return v3.InitializeFromProto(st) +} diff --git a/testing/util/merge_state_test.go b/testing/util/merge_state_test.go new file mode 100644 index 0000000000..1d4abaa372 --- /dev/null +++ b/testing/util/merge_state_test.go @@ -0,0 +1,28 @@ +package util + +import ( + "context" + "testing" + + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/testing/require" +) + +func TestDeterministicGenesisStateMerge(t *testing.T) { + st, k := DeterministicGenesisStateMerge(t, params.BeaconConfig().MaxCommitteesPerSlot) + require.Equal(t, params.BeaconConfig().MaxCommitteesPerSlot, uint64(len(k))) + require.Equal(t, params.BeaconConfig().MaxCommitteesPerSlot, uint64(st.NumValidators())) +} + +func TestGenesisBeaconStateMerge(t *testing.T) { + ctx := context.Background() + deposits, _, err := DeterministicDepositsAndKeys(params.BeaconConfig().MaxCommitteesPerSlot) + require.NoError(t, err) + eth1Data, err := DeterministicEth1Data(len(deposits)) + require.NoError(t, err) + gt := uint64(10000) + st, err := genesisBeaconStateMerge(ctx, deposits, gt, eth1Data) + require.NoError(t, err) + require.Equal(t, gt, st.GenesisTime()) + require.Equal(t, params.BeaconConfig().MaxCommitteesPerSlot, uint64(st.NumValidators())) +} From 5983d0a397d33954bf98eb655b5fa7f6f6b2cd9a Mon Sep 17 00:00:00 2001 From: Potuz Date: Sun, 28 Nov 2021 14:34:24 -0300 Subject: [PATCH 27/45] Allow requests for next sync committee (#9945) * Allow requests for next sync committee * fix deepsource and variable rename * Minor cleanup * Potuz's comments Co-authored-by: terence tsao --- beacon-chain/rpc/eth/beacon/sync_committee.go | 75 +++++++++++++++- .../rpc/eth/beacon/sync_committee_test.go | 86 +++++++++++++++++++ beacon-chain/rpc/testutil/BUILD.bazel | 11 ++- .../rpc/testutil/mock_genesis_timefetcher.go | 21 +++++ 4 files changed, 187 insertions(+), 6 deletions(-) create mode 100644 beacon-chain/rpc/testutil/mock_genesis_timefetcher.go diff --git a/beacon-chain/rpc/eth/beacon/sync_committee.go b/beacon-chain/rpc/eth/beacon/sync_committee.go index 97e89ac45b..d2e435704b 100644 --- a/beacon-chain/rpc/eth/beacon/sync_committee.go +++ b/beacon-chain/rpc/eth/beacon/sync_committee.go @@ -16,6 +16,7 @@ import ( "github.com/prysmaticlabs/prysm/proto/eth/v2" ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2" ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/time/slots" "go.opencensus.io/trace" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -27,6 +28,40 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync ctx, span := trace.StartSpan(ctx, "beacon.ListSyncCommittees") defer span.End() + currentSlot := bs.GenesisTimeFetcher.CurrentSlot() + currentEpoch := slots.ToEpoch(currentSlot) + currentPeriodStartEpoch, err := slots.SyncCommitteePeriodStartEpoch(currentEpoch) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not calculate start period for slot %d: %v", + currentSlot, + err, + ) + } + + var reqPeriodStartEpoch types.Epoch + if req.Epoch == nil { + reqPeriodStartEpoch = currentPeriodStartEpoch + } else { + reqPeriodStartEpoch, err = slots.SyncCommitteePeriodStartEpoch(*req.Epoch) + if err != nil { + return nil, status.Errorf( + codes.Internal, + "Could not calculate start period for epoch %d: %v", + *req.Epoch, + err, + ) + } + if reqPeriodStartEpoch > currentPeriodStartEpoch+params.BeaconConfig().EpochsPerSyncCommitteePeriod { + return nil, status.Errorf( + codes.Internal, + "Could not fetch sync committee too far in the future. Requested epoch: %d, current epoch: %d", + *req.Epoch, currentEpoch, + ) + } + } + st, err := bs.stateFromRequest(ctx, &stateRequest{ epoch: req.Epoch, stateId: req.StateId, @@ -35,10 +70,20 @@ func (bs *Server) ListSyncCommittees(ctx context.Context, req *ethpbv2.StateSync return nil, status.Errorf(codes.Internal, "Could not fetch beacon state using request: %v", err) } - // Get the current sync committee and sync committee indices from the state. - committeeIndices, committee, err := currentCommitteeIndicesFromState(st) - if err != nil { - return nil, status.Errorf(codes.Internal, "Could not get sync committee indices from state: %v", err) + var committeeIndices []types.ValidatorIndex + var committee *ethpbalpha.SyncCommittee + if reqPeriodStartEpoch > currentPeriodStartEpoch { + // Get the next sync committee and sync committee indices from the state. + committeeIndices, committee, err = nextCommitteeIndicesFromState(st) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not get next sync committee indices: %v", err) + } + } else { + // Get the current sync committee and sync committee indices from the state. + committeeIndices, committee, err = currentCommitteeIndicesFromState(st) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not get current sync committee indices: %v", err) + } } subcommittees, err := extractSyncSubcommittees(st, committee) if err != nil { @@ -75,6 +120,28 @@ func currentCommitteeIndicesFromState(st state.BeaconState) ([]types.ValidatorIn return committeeIndices, committee, nil } +func nextCommitteeIndicesFromState(st state.BeaconState) ([]types.ValidatorIndex, *ethpbalpha.SyncCommittee, error) { + committee, err := st.NextSyncCommittee() + if err != nil { + return nil, nil, fmt.Errorf( + "could not get sync committee: %v", err, + ) + } + + committeeIndices := make([]types.ValidatorIndex, len(committee.Pubkeys)) + for i, key := range committee.Pubkeys { + index, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(key)) + if !ok { + return nil, nil, fmt.Errorf( + "validator index not found for pubkey %#x", + bytesutil.Trunc(key), + ) + } + committeeIndices[i] = index + } + return committeeIndices, committee, nil +} + func extractSyncSubcommittees(st state.BeaconState, committee *ethpbalpha.SyncCommittee) ([]*eth.SyncSubcommitteeValidators, error) { subcommitteeCount := params.BeaconConfig().SyncCommitteeSubnetCount subcommittees := make([]*ethpbv2.SyncSubcommitteeValidators, subcommitteeCount) diff --git a/beacon-chain/rpc/eth/beacon/sync_committee_test.go b/beacon-chain/rpc/eth/beacon/sync_committee_test.go index fb17b68caa..245066ada4 100644 --- a/beacon-chain/rpc/eth/beacon/sync_committee_test.go +++ b/beacon-chain/rpc/eth/beacon/sync_committee_test.go @@ -4,6 +4,7 @@ import ( "context" "strings" "testing" + "time" "github.com/grpc-ecosystem/grpc-gateway/v2/runtime" types "github.com/prysmaticlabs/eth2-types" @@ -55,6 +56,37 @@ func Test_currentCommitteeIndicesFromState(t *testing.T) { }) } +func Test_nextCommitteeIndicesFromState(t *testing.T) { + st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize) + vals := st.Validators() + wantedCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize) + wantedIndices := make([]types.ValidatorIndex, len(wantedCommittee)) + for i := 0; i < len(wantedCommittee); i++ { + wantedIndices[i] = types.ValidatorIndex(i) + wantedCommittee[i] = vals[i].PublicKey + } + require.NoError(t, st.SetNextSyncCommittee(ðpbalpha.SyncCommittee{ + Pubkeys: wantedCommittee, + AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength), + })) + + t.Run("OK", func(t *testing.T) { + indices, committee, err := nextCommitteeIndicesFromState(st) + require.NoError(t, err) + require.DeepEqual(t, wantedIndices, indices) + require.DeepEqual(t, wantedCommittee, committee.Pubkeys) + }) + t.Run("validator in committee not found in state", func(t *testing.T) { + wantedCommittee[0] = bytesutil.PadTo([]byte("fakepubkey"), 48) + require.NoError(t, st.SetNextSyncCommittee(ðpbalpha.SyncCommittee{ + Pubkeys: wantedCommittee, + AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength), + })) + _, _, err := nextCommitteeIndicesFromState(st) + require.ErrorContains(t, "index not found for pubkey", err) + }) +} + func Test_extractSyncSubcommittees(t *testing.T) { st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize) vals := st.Validators() @@ -123,6 +155,9 @@ func TestListSyncCommittees(t *testing.T) { require.NoError(t, err) s := &Server{ + GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{ + Genesis: time.Now(), + }, StateFetcher: &testutil.MockFetcher{ BeaconState: st, }, @@ -150,6 +185,57 @@ func TestListSyncCommittees(t *testing.T) { } } +func TestListSyncCommitteesFuture(t *testing.T) { + ctx := context.Background() + st, _ := util.DeterministicGenesisStateAltair(t, params.BeaconConfig().SyncCommitteeSize) + syncCommittee := make([][]byte, params.BeaconConfig().SyncCommitteeSize) + vals := st.Validators() + for i := 0; i < len(syncCommittee); i++ { + syncCommittee[i] = vals[i].PublicKey + } + require.NoError(t, st.SetNextSyncCommittee(ðpbalpha.SyncCommittee{ + Pubkeys: syncCommittee, + AggregatePubkey: bytesutil.PadTo([]byte{}, params.BeaconConfig().BLSPubkeyLength), + })) + + s := &Server{ + GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{ + Genesis: time.Now(), + }, + StateFetcher: &testutil.MockFetcher{ + BeaconState: st, + }, + } + req := ðpbv2.StateSyncCommitteesRequest{} + epoch := 2 * params.BeaconConfig().EpochsPerSyncCommitteePeriod + req.Epoch = &epoch + _, err := s.ListSyncCommittees(ctx, req) + require.ErrorContains(t, "Could not fetch sync committee too far in the future", err) + + epoch = 2*params.BeaconConfig().EpochsPerSyncCommitteePeriod - 1 + resp, err := s.ListSyncCommittees(ctx, req) + require.NoError(t, err) + + require.NotNil(t, resp.Data) + committeeVals := resp.Data.Validators + require.NotNil(t, committeeVals) + require.Equal(t, params.BeaconConfig().SyncCommitteeSize, uint64(len(committeeVals)), "incorrect committee size") + for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSize; i++ { + assert.Equal(t, types.ValidatorIndex(i), committeeVals[i]) + } + require.NotNil(t, resp.Data.ValidatorAggregates) + assert.Equal(t, params.BeaconConfig().SyncCommitteeSubnetCount, uint64(len(resp.Data.ValidatorAggregates))) + for i := uint64(0); i < params.BeaconConfig().SyncCommitteeSubnetCount; i++ { + vStartIndex := types.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize / params.BeaconConfig().SyncCommitteeSubnetCount * i) + vEndIndex := types.ValidatorIndex(params.BeaconConfig().SyncCommitteeSize/params.BeaconConfig().SyncCommitteeSubnetCount*(i+1) - 1) + j := 0 + for vIndex := vStartIndex; vIndex <= vEndIndex; vIndex++ { + assert.Equal(t, vIndex, resp.Data.ValidatorAggregates[i].Validators[j]) + j++ + } + } +} + func TestSubmitPoolSyncCommitteeSignatures(t *testing.T) { ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{}) st, _ := util.DeterministicGenesisStateAltair(t, 10) diff --git a/beacon-chain/rpc/testutil/BUILD.bazel b/beacon-chain/rpc/testutil/BUILD.bazel index 904cc55dd1..4714040101 100644 --- a/beacon-chain/rpc/testutil/BUILD.bazel +++ b/beacon-chain/rpc/testutil/BUILD.bazel @@ -3,8 +3,15 @@ load("@prysm//tools/go:def.bzl", "go_library") go_library( name = "go_default_library", testonly = True, - srcs = ["mock_state_fetcher.go"], + srcs = [ + "mock_genesis_timefetcher.go", + "mock_state_fetcher.go", + ], importpath = "github.com/prysmaticlabs/prysm/beacon-chain/rpc/testutil", visibility = ["//beacon-chain:__subpackages__"], - deps = ["//beacon-chain/state:go_default_library"], + deps = [ + "//beacon-chain/state:go_default_library", + "//config/params:go_default_library", + "@com_github_prysmaticlabs_eth2_types//:go_default_library", + ], ) diff --git a/beacon-chain/rpc/testutil/mock_genesis_timefetcher.go b/beacon-chain/rpc/testutil/mock_genesis_timefetcher.go new file mode 100644 index 0000000000..7d54c7a049 --- /dev/null +++ b/beacon-chain/rpc/testutil/mock_genesis_timefetcher.go @@ -0,0 +1,21 @@ +package testutil + +import ( + "time" + + types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/config/params" +) + +// MockGenesisTimeFetcher is a fake implementation of the blockchain.TimeFetcher +type MockGenesisTimeFetcher struct { + Genesis time.Time +} + +func (m *MockGenesisTimeFetcher) GenesisTime() time.Time { + return m.Genesis +} + +func (m *MockGenesisTimeFetcher) CurrentSlot() types.Slot { + return types.Slot(uint64(time.Now().Unix()-m.Genesis.Unix()) / params.BeaconConfig().SecondsPerSlot) +} From 37bc407b56a2d72e9fb6448ccb92e936267eeed5 Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Tue, 30 Nov 2021 00:30:17 +0800 Subject: [PATCH 28/45] Refactor States To Allow for Single Cached Hasher (#9922) * initial changes * gaz * unexport and add in godoc * nocache * fix edge case * fix bad implementation * fix build file * add it in * terence's review * gaz * fix build * Apply suggestions from code review remove assigned ctx Co-authored-by: terence tsao --- beacon-chain/core/transition/BUILD.bazel | 1 + beacon-chain/core/transition/state.go | 3 +- beacon-chain/state/fieldtrie/BUILD.bazel | 1 - .../state/fieldtrie/field_trie_test.go | 6 +- beacon-chain/state/stateutil/BUILD.bazel | 10 +- beacon-chain/state/stateutil/eth1_root.go | 4 +- .../state/stateutil/field_root_attestation.go | 83 +++ .../{v2 => stateutil}/field_root_eth1.go | 23 +- .../{v3 => stateutil}/field_root_test.go | 12 +- .../{v3 => stateutil}/field_root_validator.go | 27 +- .../{v3 => stateutil}/field_root_vector.go | 18 +- .../stateutil/pending_attestation_root.go | 4 +- beacon-chain/state/stateutil/state_hasher.go | 547 ++++++++++++++++++ .../state/stateutil/trie_helpers_test.go | 9 +- .../state/stateutil/validator_root.go | 4 +- beacon-chain/state/v1/BUILD.bazel | 1 - beacon-chain/state/v1/field_roots.go | 190 +----- beacon-chain/state/v1/getters_attestation.go | 69 --- beacon-chain/state/v1/getters_eth1.go | 54 -- beacon-chain/state/v1/getters_misc.go | 152 ----- beacon-chain/state/v1/getters_validator.go | 82 --- .../state/v1/getters_validator_test.go | 16 - beacon-chain/state/v1/state_trie.go | 2 +- beacon-chain/state/v2/BUILD.bazel | 5 - beacon-chain/state/v2/field_root_test.go | 23 - beacon-chain/state/v2/field_root_validator.go | 78 --- beacon-chain/state/v2/field_root_vector.go | 146 ----- beacon-chain/state/v2/field_roots.go | 212 +------ beacon-chain/state/v2/state_trie.go | 2 +- beacon-chain/state/v3/BUILD.bazel | 5 - beacon-chain/state/v3/field_root_eth1.go | 59 -- beacon-chain/state/v3/field_roots.go | 220 +------ beacon-chain/state/v3/state_trie.go | 2 +- testing/util/BUILD.bazel | 1 + testing/util/altair.go | 4 +- 35 files changed, 712 insertions(+), 1363 deletions(-) create mode 100644 beacon-chain/state/stateutil/field_root_attestation.go rename beacon-chain/state/{v2 => stateutil}/field_root_eth1.go (61%) rename beacon-chain/state/{v3 => stateutil}/field_root_test.go (52%) rename beacon-chain/state/{v3 => stateutil}/field_root_validator.go (88%) rename beacon-chain/state/{v3 => stateutil}/field_root_vector.go (89%) create mode 100644 beacon-chain/state/stateutil/state_hasher.go delete mode 100644 beacon-chain/state/v2/field_root_test.go delete mode 100644 beacon-chain/state/v2/field_root_validator.go delete mode 100644 beacon-chain/state/v2/field_root_vector.go delete mode 100644 beacon-chain/state/v3/field_root_eth1.go diff --git a/beacon-chain/core/transition/BUILD.bazel b/beacon-chain/core/transition/BUILD.bazel index 1da96a65ca..d399c5cef8 100644 --- a/beacon-chain/core/transition/BUILD.bazel +++ b/beacon-chain/core/transition/BUILD.bazel @@ -35,6 +35,7 @@ go_library( "//beacon-chain/core/transition/interop:go_default_library", "//beacon-chain/core/validators:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/v1:go_default_library", "//config/features:go_default_library", "//config/params:go_default_library", diff --git a/beacon-chain/core/transition/state.go b/beacon-chain/core/transition/state.go index 993727a814..11b293fb69 100644 --- a/beacon-chain/core/transition/state.go +++ b/beacon-chain/core/transition/state.go @@ -7,6 +7,7 @@ import ( b "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" "github.com/prysmaticlabs/prysm/config/params" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" @@ -105,7 +106,7 @@ func OptimizedGenesisBeaconState(genesisTime uint64, preState state.BeaconState, slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector) - genesisValidatorsRoot, err := v1.ValidatorRegistryRoot(preState.Validators()) + genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators()) if err != nil { return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err) } diff --git a/beacon-chain/state/fieldtrie/BUILD.bazel b/beacon-chain/state/fieldtrie/BUILD.bazel index fcea708041..57833694e7 100644 --- a/beacon-chain/state/fieldtrie/BUILD.bazel +++ b/beacon-chain/state/fieldtrie/BUILD.bazel @@ -30,7 +30,6 @@ go_test( deps = [ "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/types:go_default_library", - "//beacon-chain/state/v1:go_default_library", "//config/params:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//testing/assert:go_default_library", diff --git a/beacon-chain/state/fieldtrie/field_trie_test.go b/beacon-chain/state/fieldtrie/field_trie_test.go index 5c566e0c7a..1551b74650 100644 --- a/beacon-chain/state/fieldtrie/field_trie_test.go +++ b/beacon-chain/state/fieldtrie/field_trie_test.go @@ -5,8 +5,8 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/state/fieldtrie" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" stateTypes "github.com/prysmaticlabs/prysm/beacon-chain/state/types" - v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" "github.com/prysmaticlabs/prysm/config/params" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/testing/assert" @@ -20,7 +20,7 @@ func TestFieldTrie_NewTrie(t *testing.T) { // 5 represents the enum value of state roots trie, err := fieldtrie.NewFieldTrie(5, stateTypes.BasicArray, newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot)) require.NoError(t, err) - root, err := v1.RootsArrayHashTreeRoot(newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") + root, err := stateutil.RootsArrayHashTreeRoot(newState.StateRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") require.NoError(t, err) newRoot, err := trie.TrieRoot() require.NoError(t, err) @@ -48,7 +48,7 @@ func TestFieldTrie_RecomputeTrie(t *testing.T) { require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[0]), changedVals[0])) require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[1]), changedVals[1])) - expectedRoot, err := v1.ValidatorRegistryRoot(newState.Validators()) + expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators()) require.NoError(t, err) root, err := trie.RecomputeTrie(changedIdx, newState.Validators()) require.NoError(t, err) diff --git a/beacon-chain/state/stateutil/BUILD.bazel b/beacon-chain/state/stateutil/BUILD.bazel index 8b83cb2ca9..67da4b457a 100644 --- a/beacon-chain/state/stateutil/BUILD.bazel +++ b/beacon-chain/state/stateutil/BUILD.bazel @@ -5,9 +5,14 @@ go_library( srcs = [ "block_header_root.go", "eth1_root.go", + "field_root_attestation.go", + "field_root_eth1.go", + "field_root_validator.go", + "field_root_vector.go", "participation_bit_root.go", "pending_attestation_root.go", "reference.go", + "state_hasher.go", "sync_committee.root.go", "trie_helpers.go", "validator_map_handler.go", @@ -28,6 +33,7 @@ go_library( ], deps = [ "//beacon-chain/core/transition/stateutils:go_default_library", + "//config/features:go_default_library", "//config/params:go_default_library", "//container/trie:go_default_library", "//crypto/hash:go_default_library", @@ -35,8 +41,10 @@ go_library( "//encoding/ssz:go_default_library", "//math:go_default_library", "//proto/prysm/v1alpha1:go_default_library", + "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", + "@io_opencensus_go//trace:go_default_library", ], ) @@ -44,6 +52,7 @@ go_test( name = "go_default_test", srcs = [ "benchmark_test.go", + "field_root_test.go", "reference_bench_test.go", "state_root_test.go", "stateutil_test.go", @@ -51,7 +60,6 @@ go_test( ], embed = [":go_default_library"], deps = [ - "//beacon-chain/state/v1:go_default_library", "//config/features:go_default_library", "//config/params:go_default_library", "//crypto/hash:go_default_library", diff --git a/beacon-chain/state/stateutil/eth1_root.go b/beacon-chain/state/stateutil/eth1_root.go index 987cd702db..fc06ea8d47 100644 --- a/beacon-chain/state/stateutil/eth1_root.go +++ b/beacon-chain/state/stateutil/eth1_root.go @@ -12,9 +12,9 @@ import ( ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) -// Eth1DataEncKey returns the encoded key in bytes of input `eth1Data`, +// eth1DataEncKey returns the encoded key in bytes of input `eth1Data`, // the returned key bytes can be used for caching purposes. -func Eth1DataEncKey(eth1Data *ethpb.Eth1Data) []byte { +func eth1DataEncKey(eth1Data *ethpb.Eth1Data) []byte { enc := make([]byte, 0, 96) if eth1Data != nil { if len(eth1Data.DepositRoot) > 0 { diff --git a/beacon-chain/state/stateutil/field_root_attestation.go b/beacon-chain/state/stateutil/field_root_attestation.go new file mode 100644 index 0000000000..94d2ef21e0 --- /dev/null +++ b/beacon-chain/state/stateutil/field_root_attestation.go @@ -0,0 +1,83 @@ +package stateutil + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/config/features" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/ssz" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" +) + +// RootsArrayHashTreeRoot computes the Merkle root of arrays of 32-byte hashes, such as [64][32]byte +// according to the Simple Serialize specification of Ethereum. +func RootsArrayHashTreeRoot(vals [][]byte, length uint64, fieldName string) ([32]byte, error) { + if features.Get().EnableSSZCache { + return CachedHasher.arraysRoot(vals, length, fieldName) + } + return NocachedHasher.arraysRoot(vals, length, fieldName) +} + +func (h *stateRootHasher) epochAttestationsRoot(atts []*ethpb.PendingAttestation) ([32]byte, error) { + max := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().MaxAttestations + if uint64(len(atts)) > max { + return [32]byte{}, fmt.Errorf("epoch attestation exceeds max length %d", max) + } + + hasher := hash.CustomSHA256Hasher() + roots := make([][]byte, len(atts)) + for i := 0; i < len(atts); i++ { + pendingRoot, err := h.pendingAttestationRoot(hasher, atts[i]) + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not attestation merkleization") + } + roots[i] = pendingRoot[:] + } + + attsRootsRoot, err := ssz.BitwiseMerkleize( + hasher, + roots, + uint64(len(roots)), + uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)), + ) + if err != nil { + return [32]byte{}, errors.Wrap(err, "could not compute epoch attestations merkleization") + } + attsLenBuf := new(bytes.Buffer) + if err := binary.Write(attsLenBuf, binary.LittleEndian, uint64(len(atts))); err != nil { + return [32]byte{}, errors.Wrap(err, "could not marshal epoch attestations length") + } + // We need to mix in the length of the slice. + attsLenRoot := make([]byte, 32) + copy(attsLenRoot, attsLenBuf.Bytes()) + res := ssz.MixInLength(attsRootsRoot, attsLenRoot) + return res, nil +} + +func (h *stateRootHasher) pendingAttestationRoot(hasher ssz.HashFn, att *ethpb.PendingAttestation) ([32]byte, error) { + if att == nil { + return [32]byte{}, errors.New("nil pending attestation") + } + // Marshal attestation to determine if it exists in the cache. + enc := pendingAttEncKey(att) + + // Check if it exists in cache: + if h.rootsCache != nil { + if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok { + return found.([32]byte), nil + } + } + + res, err := PendingAttRootWithHasher(hasher, att) + if err != nil { + return [32]byte{}, err + } + if h.rootsCache != nil { + h.rootsCache.Set(string(enc), res, 32) + } + return res, nil +} diff --git a/beacon-chain/state/v2/field_root_eth1.go b/beacon-chain/state/stateutil/field_root_eth1.go similarity index 61% rename from beacon-chain/state/v2/field_root_eth1.go rename to beacon-chain/state/stateutil/field_root_eth1.go index 1f805966ab..de536fb23e 100644 --- a/beacon-chain/state/v2/field_root_eth1.go +++ b/beacon-chain/state/stateutil/field_root_eth1.go @@ -1,35 +1,34 @@ -package v2 +package stateutil import ( "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) -// eth1Root computes the HashTreeRoot Merkleization of +// Eth1Root computes the HashTreeRoot Merkleization of // a BeaconBlockHeader struct according to the eth2 // Simple Serialize specification. -func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) { +func Eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) { if eth1Data == nil { return [32]byte{}, errors.New("nil eth1 data") } - enc := stateutil.Eth1DataEncKey(eth1Data) + enc := eth1DataEncKey(eth1Data) if features.Get().EnableSSZCache { - if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil { + if found, ok := CachedHasher.rootsCache.Get(string(enc)); ok && found != nil { return found.([32]byte), nil } } - root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data) + root, err := Eth1DataRootWithHasher(hasher, eth1Data) if err != nil { return [32]byte{}, err } if features.Get().EnableSSZCache { - cachedHasher.rootsCache.Set(string(enc), root, 32) + CachedHasher.rootsCache.Set(string(enc), root, 32) } return root, nil } @@ -38,22 +37,22 @@ func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) { // a list of Eth1Data structs according to the eth2 // Simple Serialize specification. func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) { - hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes) + hashKey, err := Eth1DatasEncKey(eth1DataVotes) if err != nil { return [32]byte{}, err } if features.Get().EnableSSZCache { - if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil { + if found, ok := CachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil { return found.([32]byte), nil } } - root, err := stateutil.Eth1DatasRoot(eth1DataVotes) + root, err := Eth1DatasRoot(eth1DataVotes) if err != nil { return [32]byte{}, err } if features.Get().EnableSSZCache { - cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32) + CachedHasher.rootsCache.Set(string(hashKey[:]), root, 32) } return root, nil } diff --git a/beacon-chain/state/v3/field_root_test.go b/beacon-chain/state/stateutil/field_root_test.go similarity index 52% rename from beacon-chain/state/v3/field_root_test.go rename to beacon-chain/state/stateutil/field_root_test.go index 54c8feda18..3d74deb89e 100644 --- a/beacon-chain/state/v3/field_root_test.go +++ b/beacon-chain/state/stateutil/field_root_test.go @@ -1,4 +1,4 @@ -package v3 +package stateutil import ( "testing" @@ -7,17 +7,17 @@ import ( ) func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) { - _, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing") + _, err := NocachedHasher.arraysRoot([][]byte{}, 1, "testing") assert.NoError(t, err) - _, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing") + _, err = NocachedHasher.arraysRoot([][]byte{}, 4, "testing") assert.NoError(t, err) - _, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing") + _, err = NocachedHasher.arraysRoot([][]byte{}, 8, "testing") assert.NoError(t, err) - _, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing") + _, err = NocachedHasher.arraysRoot([][]byte{}, 10, "testing") assert.ErrorContains(t, "hash layer is a non power of 2", err) } func TestArraysTreeRoot_ZeroLength(t *testing.T) { - _, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing") + _, err := NocachedHasher.arraysRoot([][]byte{}, 0, "testing") assert.ErrorContains(t, "zero leaves provided", err) } diff --git a/beacon-chain/state/v3/field_root_validator.go b/beacon-chain/state/stateutil/field_root_validator.go similarity index 88% rename from beacon-chain/state/v3/field_root_validator.go rename to beacon-chain/state/stateutil/field_root_validator.go index 2f5c96a1b2..40e25dec34 100644 --- a/beacon-chain/state/v3/field_root_validator.go +++ b/beacon-chain/state/stateutil/field_root_validator.go @@ -1,11 +1,10 @@ -package v3 +package stateutil import ( "bytes" "encoding/binary" "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/crypto/hash" @@ -13,6 +12,16 @@ import ( ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) +// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of +// a list of validator structs according to the Ethereum +// Simple Serialize specification. +func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) { + if features.Get().EnableSSZCache { + return CachedHasher.validatorRegistryRoot(vals) + } + return NocachedHasher.validatorRegistryRoot(vals) +} + func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) { hashKeyElements := make([]byte, len(validators)*32) roots := make([][32]byte, len(validators)) @@ -59,7 +68,7 @@ func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Vali return [32]byte{}, errors.New("nil validator") } - enc := stateutil.ValidatorEncKey(validator) + enc := validatorEncKey(validator) // Check if it exists in cache: if h.rootsCache != nil { if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok { @@ -67,7 +76,7 @@ func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Vali } } - valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator) + valRoot, err := ValidatorRootWithHasher(hasher, validator) if err != nil { return [32]byte{}, err } @@ -77,13 +86,3 @@ func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Vali } return valRoot, nil } - -// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of -// a list of validator structs according to the eth2 -// Simple Serialize specification. -func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) { - if features.Get().EnableSSZCache { - return cachedHasher.validatorRegistryRoot(vals) - } - return nocachedHasher.validatorRegistryRoot(vals) -} diff --git a/beacon-chain/state/v3/field_root_vector.go b/beacon-chain/state/stateutil/field_root_vector.go similarity index 89% rename from beacon-chain/state/v3/field_root_vector.go rename to beacon-chain/state/stateutil/field_root_vector.go index 7654d45e3d..1e9df49d2c 100644 --- a/beacon-chain/state/v3/field_root_vector.go +++ b/beacon-chain/state/stateutil/field_root_vector.go @@ -1,8 +1,7 @@ -package v3 +package stateutil import ( "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/crypto/hash" "github.com/prysmaticlabs/prysm/encoding/ssz" ) @@ -26,7 +25,18 @@ func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName st if len(prevLeaves) == 0 || h.rootsCache == nil { prevLeaves = leaves } - + // Exit early if our previous leaves length don't match with the current set. + // This should never happen but better to be defensive here. + if len(prevLeaves) != len(leaves) { + res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc) + if err != nil { + return [32]byte{}, err + } + if h.rootsCache != nil { + leavesCache[fieldName] = leaves + } + return res, nil + } for i := 0; i < len(leaves); i++ { // We check if any items changed since the roots were last recomputed. notEqual := leaves[i] != prevLeaves[i] @@ -134,7 +144,7 @@ func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64, } layers[0] = hashLayer var err error - layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher) + layers, hashLayer, err = MerkleizeTrieLeaves(layers, hashLayer, hasher) if err != nil { return [32]byte{}, err } diff --git a/beacon-chain/state/stateutil/pending_attestation_root.go b/beacon-chain/state/stateutil/pending_attestation_root.go index 744f09f2fc..8503be51fc 100644 --- a/beacon-chain/state/stateutil/pending_attestation_root.go +++ b/beacon-chain/state/stateutil/pending_attestation_root.go @@ -40,9 +40,9 @@ func PendingAttRootWithHasher(hasher ssz.HashFn, att *ethpb.PendingAttestation) return ssz.BitwiseMerkleizeArrays(hasher, fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots))) } -// PendingAttEncKey returns the encoded key in bytes of input `pendingAttestation`, +// pendingAttEncKey returns the encoded key in bytes of input `pendingAttestation`, // the returned key bytes can be used for caching purposes. -func PendingAttEncKey(att *ethpb.PendingAttestation) []byte { +func pendingAttEncKey(att *ethpb.PendingAttestation) []byte { enc := make([]byte, 2192) if att != nil { diff --git a/beacon-chain/state/stateutil/state_hasher.go b/beacon-chain/state/stateutil/state_hasher.go new file mode 100644 index 0000000000..c85c8a0e97 --- /dev/null +++ b/beacon-chain/state/stateutil/state_hasher.go @@ -0,0 +1,547 @@ +package stateutil + +import ( + "context" + "encoding/binary" + "sync" + + "github.com/dgraph-io/ristretto" + "github.com/pkg/errors" + "github.com/prysmaticlabs/prysm/config/params" + "github.com/prysmaticlabs/prysm/crypto/hash" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/encoding/ssz" + ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "go.opencensus.io/trace" +) + +var ( + // Set the map size as equal to that of the latest state field count. + leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount) + layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount) + lock sync.RWMutex +) + +const cacheSize = 100000 + +// NocachedHasher references a hasher that will not utilize a cache. +var NocachedHasher *stateRootHasher + +// CachedHasher references a hasher that will utilize a roots cache. +var CachedHasher *stateRootHasher + +func init() { + rootsCache, err := ristretto.NewCache(&ristretto.Config{ + NumCounters: cacheSize, // number of keys to track frequency of (1M). + MaxCost: 1 << 22, // maximum cost of cache (3MB). + // 100,000 roots will take up approximately 3 MB in memory. + BufferItems: 64, // number of keys per Get buffer. + }) + if err != nil { + panic(err) + } + // Temporarily disable roots cache until cache issues can be resolved. + CachedHasher = &stateRootHasher{rootsCache: rootsCache} + NocachedHasher = &stateRootHasher{} +} + +// stateRootHasher defines an object through which we can +// hash the different fields in the state with a few cached layers. +type stateRootHasher struct { + rootsCache *ristretto.Cache +} + +// ComputeFieldRootsWithHasherPhase0 hashes the provided phase 0 state and returns its respective field roots. +func (h *stateRootHasher) ComputeFieldRootsWithHasherPhase0(ctx context.Context, state *ethpb.BeaconState) ([][]byte, error) { + _, span := trace.StartSpan(ctx, "hasher.ComputeFieldRootsWithHasherPhase0") + defer span.End() + + if state == nil { + return nil, errors.New("nil state") + } + hasher := hash.CustomSHA256Hasher() + fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateFieldCount) + + // Genesis time root. + genesisRoot := ssz.Uint64Root(state.GenesisTime) + fieldRoots[0] = genesisRoot[:] + + // Genesis validator root. + r := [32]byte{} + copy(r[:], state.GenesisValidatorsRoot) + fieldRoots[1] = r[:] + + // Slot root. + slotRoot := ssz.Uint64Root(uint64(state.Slot)) + fieldRoots[2] = slotRoot[:] + + // Fork data structure root. + forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) + if err != nil { + return nil, errors.Wrap(err, "could not compute fork merkleization") + } + fieldRoots[3] = forkHashTreeRoot[:] + + // BeaconBlockHeader data structure root. + headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader) + if err != nil { + return nil, errors.Wrap(err, "could not compute block header merkleization") + } + fieldRoots[4] = headerHashTreeRoot[:] + + // BlockRoots array root. + blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute block roots merkleization") + } + fieldRoots[5] = blockRootsRoot[:] + + // StateRoots array root. + stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute state roots merkleization") + } + fieldRoots[6] = stateRootsRoot[:] + + // HistoricalRoots slice root. + historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) + if err != nil { + return nil, errors.Wrap(err, "could not compute historical roots merkleization") + } + fieldRoots[7] = historicalRootsRt[:] + + // Eth1Data data structure root. + eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data merkleization") + } + fieldRoots[8] = eth1HashTreeRoot[:] + + // Eth1DataVotes slice root. + eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") + } + fieldRoots[9] = eth1VotesRoot[:] + + // Eth1DepositIndex root. + eth1DepositIndexBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) + eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) + fieldRoots[10] = eth1DepositBuf[:] + + // Validators slice root. + validatorsRoot, err := h.validatorRegistryRoot(state.Validators) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator registry merkleization") + } + fieldRoots[11] = validatorsRoot[:] + + // Balances slice root. + balancesRoot, err := Uint64ListRootWithRegistryLimit(state.Balances) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator balances merkleization") + } + fieldRoots[12] = balancesRoot[:] + + // RandaoMixes array root. + randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") + if err != nil { + return nil, errors.Wrap(err, "could not compute randao roots merkleization") + } + fieldRoots[13] = randaoRootsRoot[:] + + // Slashings array root. + slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) + if err != nil { + return nil, errors.Wrap(err, "could not compute slashings merkleization") + } + fieldRoots[14] = slashingsRootsRoot[:] + + // PreviousEpochAttestations slice root. + prevAttsRoot, err := h.epochAttestationsRoot(state.PreviousEpochAttestations) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous epoch attestations merkleization") + } + fieldRoots[15] = prevAttsRoot[:] + + // CurrentEpochAttestations slice root. + currAttsRoot, err := h.epochAttestationsRoot(state.CurrentEpochAttestations) + if err != nil { + return nil, errors.Wrap(err, "could not compute current epoch attestations merkleization") + } + fieldRoots[16] = currAttsRoot[:] + + // JustificationBits root. + justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) + fieldRoots[17] = justifiedBitsRoot[:] + + // PreviousJustifiedCheckpoint data structure root. + prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") + } + fieldRoots[18] = prevCheckRoot[:] + + // CurrentJustifiedCheckpoint data structure root. + currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") + } + fieldRoots[19] = currJustRoot[:] + + // FinalizedCheckpoint data structure root. + finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") + } + fieldRoots[20] = finalRoot[:] + return fieldRoots, nil +} + +// ComputeFieldRootsWithHasherAltair hashes the provided altair state and returns its respective field roots. +func (h *stateRootHasher) ComputeFieldRootsWithHasherAltair(ctx context.Context, state *ethpb.BeaconStateAltair) ([][]byte, error) { + _, span := trace.StartSpan(ctx, "hasher.ComputeFieldRootsWithHasherAltair") + defer span.End() + + if state == nil { + return nil, errors.New("nil state") + } + hasher := hash.CustomSHA256Hasher() + fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateAltairFieldCount) + + // Genesis time root. + genesisRoot := ssz.Uint64Root(state.GenesisTime) + fieldRoots[0] = genesisRoot[:] + + // Genesis validator root. + r := [32]byte{} + copy(r[:], state.GenesisValidatorsRoot) + fieldRoots[1] = r[:] + + // Slot root. + slotRoot := ssz.Uint64Root(uint64(state.Slot)) + fieldRoots[2] = slotRoot[:] + + // Fork data structure root. + forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) + if err != nil { + return nil, errors.Wrap(err, "could not compute fork merkleization") + } + fieldRoots[3] = forkHashTreeRoot[:] + + // BeaconBlockHeader data structure root. + headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader) + if err != nil { + return nil, errors.Wrap(err, "could not compute block header merkleization") + } + fieldRoots[4] = headerHashTreeRoot[:] + + // BlockRoots array root. + blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute block roots merkleization") + } + fieldRoots[5] = blockRootsRoot[:] + + // StateRoots array root. + stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute state roots merkleization") + } + fieldRoots[6] = stateRootsRoot[:] + + // HistoricalRoots slice root. + historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) + if err != nil { + return nil, errors.Wrap(err, "could not compute historical roots merkleization") + } + fieldRoots[7] = historicalRootsRt[:] + + // Eth1Data data structure root. + eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data merkleization") + } + fieldRoots[8] = eth1HashTreeRoot[:] + + // Eth1DataVotes slice root. + eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") + } + fieldRoots[9] = eth1VotesRoot[:] + + // Eth1DepositIndex root. + eth1DepositIndexBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) + eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) + fieldRoots[10] = eth1DepositBuf[:] + + // Validators slice root. + validatorsRoot, err := h.validatorRegistryRoot(state.Validators) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator registry merkleization") + } + fieldRoots[11] = validatorsRoot[:] + + // Balances slice root. + balancesRoot, err := Uint64ListRootWithRegistryLimit(state.Balances) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator balances merkleization") + } + fieldRoots[12] = balancesRoot[:] + + // RandaoMixes array root. + randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") + if err != nil { + return nil, errors.Wrap(err, "could not compute randao roots merkleization") + } + fieldRoots[13] = randaoRootsRoot[:] + + // Slashings array root. + slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) + if err != nil { + return nil, errors.Wrap(err, "could not compute slashings merkleization") + } + fieldRoots[14] = slashingsRootsRoot[:] + + // PreviousEpochParticipation slice root. + prevParticipationRoot, err := ParticipationBitsRoot(state.PreviousEpochParticipation) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization") + } + fieldRoots[15] = prevParticipationRoot[:] + + // CurrentEpochParticipation slice root. + currParticipationRoot, err := ParticipationBitsRoot(state.CurrentEpochParticipation) + if err != nil { + return nil, errors.Wrap(err, "could not compute current epoch participation merkleization") + } + fieldRoots[16] = currParticipationRoot[:] + + // JustificationBits root. + justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) + fieldRoots[17] = justifiedBitsRoot[:] + + // PreviousJustifiedCheckpoint data structure root. + prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") + } + fieldRoots[18] = prevCheckRoot[:] + + // CurrentJustifiedCheckpoint data structure root. + currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") + } + fieldRoots[19] = currJustRoot[:] + + // FinalizedCheckpoint data structure root. + finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") + } + fieldRoots[20] = finalRoot[:] + + // Inactivity scores root. + inactivityScoresRoot, err := Uint64ListRootWithRegistryLimit(state.InactivityScores) + if err != nil { + return nil, errors.Wrap(err, "could not compute inactivityScoreRoot") + } + fieldRoots[21] = inactivityScoresRoot[:] + + // Current sync committee root. + currentSyncCommitteeRoot, err := SyncCommitteeRoot(state.CurrentSyncCommittee) + if err != nil { + return nil, errors.Wrap(err, "could not compute sync committee merkleization") + } + fieldRoots[22] = currentSyncCommitteeRoot[:] + + // Next sync committee root. + nextSyncCommitteeRoot, err := SyncCommitteeRoot(state.NextSyncCommittee) + if err != nil { + return nil, errors.Wrap(err, "could not compute sync committee merkleization") + } + fieldRoots[23] = nextSyncCommitteeRoot[:] + + return fieldRoots, nil +} + +// ComputeFieldRootsWithHasherMerge hashes the provided merge state and returns its respective field roots. +func (h *stateRootHasher) ComputeFieldRootsWithHasherMerge(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) { + _, span := trace.StartSpan(ctx, "hasher.ComputeFieldRootsWithHasherMerge") + defer span.End() + + if state == nil { + return nil, errors.New("nil state") + } + hasher := hash.CustomSHA256Hasher() + fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount) + + // Genesis time root. + genesisRoot := ssz.Uint64Root(state.GenesisTime) + fieldRoots[0] = genesisRoot[:] + + // Genesis validator root. + r := [32]byte{} + copy(r[:], state.GenesisValidatorsRoot) + fieldRoots[1] = r[:] + + // Slot root. + slotRoot := ssz.Uint64Root(uint64(state.Slot)) + fieldRoots[2] = slotRoot[:] + + // Fork data structure root. + forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) + if err != nil { + return nil, errors.Wrap(err, "could not compute fork merkleization") + } + fieldRoots[3] = forkHashTreeRoot[:] + + // BeaconBlockHeader data structure root. + headerHashTreeRoot, err := BlockHeaderRoot(state.LatestBlockHeader) + if err != nil { + return nil, errors.Wrap(err, "could not compute block header merkleization") + } + fieldRoots[4] = headerHashTreeRoot[:] + + // BlockRoots array root. + blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute block roots merkleization") + } + fieldRoots[5] = blockRootsRoot[:] + + // StateRoots array root. + stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") + if err != nil { + return nil, errors.Wrap(err, "could not compute state roots merkleization") + } + fieldRoots[6] = stateRootsRoot[:] + + // HistoricalRoots slice root. + historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) + if err != nil { + return nil, errors.Wrap(err, "could not compute historical roots merkleization") + } + fieldRoots[7] = historicalRootsRt[:] + + // Eth1Data data structure root. + eth1HashTreeRoot, err := Eth1Root(hasher, state.Eth1Data) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data merkleization") + } + fieldRoots[8] = eth1HashTreeRoot[:] + + // Eth1DataVotes slice root. + eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) + if err != nil { + return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") + } + fieldRoots[9] = eth1VotesRoot[:] + + // Eth1DepositIndex root. + eth1DepositIndexBuf := make([]byte, 8) + binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) + eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) + fieldRoots[10] = eth1DepositBuf[:] + + // Validators slice root. + validatorsRoot, err := h.validatorRegistryRoot(state.Validators) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator registry merkleization") + } + fieldRoots[11] = validatorsRoot[:] + + // Balances slice root. + balancesRoot, err := Uint64ListRootWithRegistryLimit(state.Balances) + if err != nil { + return nil, errors.Wrap(err, "could not compute validator balances merkleization") + } + fieldRoots[12] = balancesRoot[:] + + // RandaoMixes array root. + randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") + if err != nil { + return nil, errors.Wrap(err, "could not compute randao roots merkleization") + } + fieldRoots[13] = randaoRootsRoot[:] + + // Slashings array root. + slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) + if err != nil { + return nil, errors.Wrap(err, "could not compute slashings merkleization") + } + fieldRoots[14] = slashingsRootsRoot[:] + + // PreviousEpochParticipation slice root. + prevParticipationRoot, err := ParticipationBitsRoot(state.PreviousEpochParticipation) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization") + } + fieldRoots[15] = prevParticipationRoot[:] + + // CurrentEpochParticipation slice root. + currParticipationRoot, err := ParticipationBitsRoot(state.CurrentEpochParticipation) + if err != nil { + return nil, errors.Wrap(err, "could not compute current epoch participation merkleization") + } + fieldRoots[16] = currParticipationRoot[:] + + // JustificationBits root. + justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) + fieldRoots[17] = justifiedBitsRoot[:] + + // PreviousJustifiedCheckpoint data structure root. + prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") + } + fieldRoots[18] = prevCheckRoot[:] + + // CurrentJustifiedCheckpoint data structure root. + currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") + } + fieldRoots[19] = currJustRoot[:] + + // FinalizedCheckpoint data structure root. + finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) + if err != nil { + return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") + } + fieldRoots[20] = finalRoot[:] + + // Inactivity scores root. + inactivityScoresRoot, err := Uint64ListRootWithRegistryLimit(state.InactivityScores) + if err != nil { + return nil, errors.Wrap(err, "could not compute inactivityScoreRoot") + } + fieldRoots[21] = inactivityScoresRoot[:] + + // Current sync committee root. + currentSyncCommitteeRoot, err := SyncCommitteeRoot(state.CurrentSyncCommittee) + if err != nil { + return nil, errors.Wrap(err, "could not compute sync committee merkleization") + } + fieldRoots[22] = currentSyncCommitteeRoot[:] + + // Next sync committee root. + nextSyncCommitteeRoot, err := SyncCommitteeRoot(state.NextSyncCommittee) + if err != nil { + return nil, errors.Wrap(err, "could not compute sync committee merkleization") + } + fieldRoots[23] = nextSyncCommitteeRoot[:] + + // Execution payload root. + executionPayloadRoot, err := state.LatestExecutionPayloadHeader.HashTreeRoot() + if err != nil { + return nil, err + } + fieldRoots[24] = executionPayloadRoot[:] + + return fieldRoots, nil +} diff --git a/beacon-chain/state/stateutil/trie_helpers_test.go b/beacon-chain/state/stateutil/trie_helpers_test.go index 4e32923a07..13481af9f1 100644 --- a/beacon-chain/state/stateutil/trie_helpers_test.go +++ b/beacon-chain/state/stateutil/trie_helpers_test.go @@ -5,7 +5,6 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/crypto/hash" "github.com/prysmaticlabs/prysm/encoding/bytesutil" @@ -17,7 +16,7 @@ import ( func TestReturnTrieLayer_OK(t *testing.T) { newState, _ := util.DeterministicGenesisState(t, 32) - root, err := v1.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") + root, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") require.NoError(t, err) blockRts := newState.BlockRoots() roots := make([][32]byte, 0, len(blockRts)) @@ -32,7 +31,7 @@ func TestReturnTrieLayer_OK(t *testing.T) { func TestReturnTrieLayerVariable_OK(t *testing.T) { newState, _ := util.DeterministicGenesisState(t, 32) - root, err := v1.ValidatorRegistryRoot(newState.Validators()) + root, err := stateutil.ValidatorRegistryRoot(newState.Validators()) require.NoError(t, err) hasher := hash.CustomSHA256Hasher() validators := newState.Validators() @@ -64,7 +63,7 @@ func TestRecomputeFromLayer_FixedSizedArray(t *testing.T) { require.NoError(t, newState.UpdateBlockRootAtIndex(changedIdx[0], changedRoots[0])) require.NoError(t, newState.UpdateBlockRootAtIndex(changedIdx[1], changedRoots[1])) - expectedRoot, err := v1.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") + expectedRoot, err := stateutil.RootsArrayHashTreeRoot(newState.BlockRoots(), uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") require.NoError(t, err) root, _, err := stateutil.RecomputeFromLayer(changedRoots, changedIdx, layers) require.NoError(t, err) @@ -98,7 +97,7 @@ func TestRecomputeFromLayer_VariableSizedArray(t *testing.T) { require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[0]), changedVals[0])) require.NoError(t, newState.UpdateValidatorAtIndex(types.ValidatorIndex(changedIdx[1]), changedVals[1])) - expectedRoot, err := v1.ValidatorRegistryRoot(newState.Validators()) + expectedRoot, err := stateutil.ValidatorRegistryRoot(newState.Validators()) require.NoError(t, err) roots = make([][32]byte, 0, len(changedVals)) for _, val := range changedVals { diff --git a/beacon-chain/state/stateutil/validator_root.go b/beacon-chain/state/stateutil/validator_root.go index e41fc0d6af..8c88d4a148 100644 --- a/beacon-chain/state/stateutil/validator_root.go +++ b/beacon-chain/state/stateutil/validator_root.go @@ -88,9 +88,9 @@ func Uint64ListRootWithRegistryLimit(balances []uint64) ([32]byte, error) { return ssz.MixInLength(balancesRootsRoot, balancesLengthRoot), nil } -// ValidatorEncKey returns the encoded key in bytes of input `validator`, +// validatorEncKey returns the encoded key in bytes of input `validator`, // the returned key bytes can be used for caching purposes. -func ValidatorEncKey(validator *ethpb.Validator) []byte { +func validatorEncKey(validator *ethpb.Validator) []byte { if validator == nil { return nil } diff --git a/beacon-chain/state/v1/BUILD.bazel b/beacon-chain/state/v1/BUILD.bazel index ca1220a9c5..54dfd8bc45 100644 --- a/beacon-chain/state/v1/BUILD.bazel +++ b/beacon-chain/state/v1/BUILD.bazel @@ -56,7 +56,6 @@ go_library( "//encoding/ssz:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//runtime/version:go_default_library", - "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", diff --git a/beacon-chain/state/v1/field_roots.go b/beacon-chain/state/v1/field_roots.go index 4c154e75e3..0b9983b52c 100644 --- a/beacon-chain/state/v1/field_roots.go +++ b/beacon-chain/state/v1/field_roots.go @@ -2,203 +2,17 @@ package v1 import ( "context" - "encoding/binary" - "sync" - "github.com/dgraph-io/ristretto" - "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/config/features" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" - "github.com/prysmaticlabs/prysm/encoding/bytesutil" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" - "go.opencensus.io/trace" ) -var ( - leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateFieldCount) - layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateFieldCount) - lock sync.RWMutex -) - -const cacheSize = 100000 - -var nocachedHasher *stateRootHasher -var cachedHasher *stateRootHasher - -func init() { - rootsCache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: cacheSize, // number of keys to track frequency of (1M). - MaxCost: 1 << 22, // maximum cost of cache (3MB). - // 100,000 roots will take up approximately 3 MB in memory. - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - // Temporarily disable roots cache until cache issues can be resolved. - cachedHasher = &stateRootHasher{rootsCache: rootsCache} - nocachedHasher = &stateRootHasher{} -} - -type stateRootHasher struct { - rootsCache *ristretto.Cache -} - // computeFieldRoots returns the hash tree root computations of every field in // the beacon state as a list of 32 byte roots. func computeFieldRoots(ctx context.Context, state *ethpb.BeaconState) ([][]byte, error) { if features.Get().EnableSSZCache { - return cachedHasher.computeFieldRootsWithHasher(ctx, state) + return stateutil.CachedHasher.ComputeFieldRootsWithHasherPhase0(ctx, state) } - return nocachedHasher.computeFieldRootsWithHasher(ctx, state) -} - -func (h *stateRootHasher) computeFieldRootsWithHasher(ctx context.Context, state *ethpb.BeaconState) ([][]byte, error) { - ctx, span := trace.StartSpan(ctx, "beaconState.computeFieldRootsWithHasher") - defer span.End() - - if state == nil { - return nil, errors.New("nil state") - } - hasher := hash.CustomSHA256Hasher() - fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateFieldCount) - - // Genesis time root. - genesisRoot := ssz.Uint64Root(state.GenesisTime) - fieldRoots[0] = genesisRoot[:] - - // Genesis validator root. - r := [32]byte{} - copy(r[:], state.GenesisValidatorsRoot) - fieldRoots[1] = r[:] - - // Slot root. - slotRoot := ssz.Uint64Root(uint64(state.Slot)) - fieldRoots[2] = slotRoot[:] - - // Fork data structure root. - forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) - if err != nil { - return nil, errors.Wrap(err, "could not compute fork merkleization") - } - fieldRoots[3] = forkHashTreeRoot[:] - - // BeaconBlockHeader data structure root. - headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader) - if err != nil { - return nil, errors.Wrap(err, "could not compute block header merkleization") - } - fieldRoots[4] = headerHashTreeRoot[:] - - // BlockRoots array root. - blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") - if err != nil { - return nil, errors.Wrap(err, "could not compute block roots merkleization") - } - fieldRoots[5] = blockRootsRoot[:] - - // StateRoots array root. - stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") - if err != nil { - return nil, errors.Wrap(err, "could not compute state roots merkleization") - } - fieldRoots[6] = stateRootsRoot[:] - - // HistoricalRoots slice root. - historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) - if err != nil { - return nil, errors.Wrap(err, "could not compute historical roots merkleization") - } - fieldRoots[7] = historicalRootsRt[:] - - // Eth1Data data structure root. - eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data) - if err != nil { - return nil, errors.Wrap(err, "could not compute eth1data merkleization") - } - fieldRoots[8] = eth1HashTreeRoot[:] - - // Eth1DataVotes slice root. - eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) - if err != nil { - return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") - } - fieldRoots[9] = eth1VotesRoot[:] - - // Eth1DepositIndex root. - eth1DepositIndexBuf := make([]byte, 8) - binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) - eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) - fieldRoots[10] = eth1DepositBuf[:] - - // Validators slice root. - validatorsRoot, err := h.validatorRegistryRoot(state.Validators) - if err != nil { - return nil, errors.Wrap(err, "could not compute validator registry merkleization") - } - fieldRoots[11] = validatorsRoot[:] - - // Balances slice root. - balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances) - if err != nil { - return nil, errors.Wrap(err, "could not compute validator balances merkleization") - } - fieldRoots[12] = balancesRoot[:] - - // RandaoMixes array root. - randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") - if err != nil { - return nil, errors.Wrap(err, "could not compute randao roots merkleization") - } - fieldRoots[13] = randaoRootsRoot[:] - - // Slashings array root. - slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) - if err != nil { - return nil, errors.Wrap(err, "could not compute slashings merkleization") - } - fieldRoots[14] = slashingsRootsRoot[:] - - // PreviousEpochAttestations slice root. - prevAttsRoot, err := h.epochAttestationsRoot(state.PreviousEpochAttestations) - if err != nil { - return nil, errors.Wrap(err, "could not compute previous epoch attestations merkleization") - } - fieldRoots[15] = prevAttsRoot[:] - - // CurrentEpochAttestations slice root. - currAttsRoot, err := h.epochAttestationsRoot(state.CurrentEpochAttestations) - if err != nil { - return nil, errors.Wrap(err, "could not compute current epoch attestations merkleization") - } - fieldRoots[16] = currAttsRoot[:] - - // JustificationBits root. - justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) - fieldRoots[17] = justifiedBitsRoot[:] - - // PreviousJustifiedCheckpoint data structure root. - prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") - } - fieldRoots[18] = prevCheckRoot[:] - - // CurrentJustifiedCheckpoint data structure root. - currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") - } - fieldRoots[19] = currJustRoot[:] - - // FinalizedCheckpoint data structure root. - finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") - } - fieldRoots[20] = finalRoot[:] - return fieldRoots, nil + return stateutil.NocachedHasher.ComputeFieldRootsWithHasherPhase0(ctx, state) } diff --git a/beacon-chain/state/v1/getters_attestation.go b/beacon-chain/state/v1/getters_attestation.go index 3a202e6c8a..e7e3bd6901 100644 --- a/beacon-chain/state/v1/getters_attestation.go +++ b/beacon-chain/state/v1/getters_attestation.go @@ -1,15 +1,6 @@ package v1 import ( - "bytes" - "encoding/binary" - "fmt" - - "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) @@ -62,63 +53,3 @@ func (b *BeaconState) currentEpochAttestations() []*ethpb.PendingAttestation { return ethpb.CopyPendingAttestationSlice(b.state.CurrentEpochAttestations) } - -func (h *stateRootHasher) epochAttestationsRoot(atts []*ethpb.PendingAttestation) ([32]byte, error) { - max := uint64(params.BeaconConfig().SlotsPerEpoch) * params.BeaconConfig().MaxAttestations - if uint64(len(atts)) > max { - return [32]byte{}, fmt.Errorf("epoch attestation exceeds max length %d", max) - } - - hasher := hash.CustomSHA256Hasher() - roots := make([][]byte, len(atts)) - for i := 0; i < len(atts); i++ { - pendingRoot, err := h.pendingAttestationRoot(hasher, atts[i]) - if err != nil { - return [32]byte{}, errors.Wrap(err, "could not attestation merkleization") - } - roots[i] = pendingRoot[:] - } - - attsRootsRoot, err := ssz.BitwiseMerkleize( - hasher, - roots, - uint64(len(roots)), - uint64(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().MaxAttestations)), - ) - if err != nil { - return [32]byte{}, errors.Wrap(err, "could not compute epoch attestations merkleization") - } - attsLenBuf := new(bytes.Buffer) - if err := binary.Write(attsLenBuf, binary.LittleEndian, uint64(len(atts))); err != nil { - return [32]byte{}, errors.Wrap(err, "could not marshal epoch attestations length") - } - // We need to mix in the length of the slice. - attsLenRoot := make([]byte, 32) - copy(attsLenRoot, attsLenBuf.Bytes()) - res := ssz.MixInLength(attsRootsRoot, attsLenRoot) - return res, nil -} - -func (h *stateRootHasher) pendingAttestationRoot(hasher ssz.HashFn, att *ethpb.PendingAttestation) ([32]byte, error) { - if att == nil { - return [32]byte{}, errors.New("nil pending attestation") - } - // Marshal attestation to determine if it exists in the cache. - enc := stateutil.PendingAttEncKey(att) - - // Check if it exists in cache: - if h.rootsCache != nil { - if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok { - return found.([32]byte), nil - } - } - - res, err := stateutil.PendingAttRootWithHasher(hasher, att) - if err != nil { - return [32]byte{}, err - } - if h.rootsCache != nil { - h.rootsCache.Set(string(enc), res, 32) - } - return res, nil -} diff --git a/beacon-chain/state/v1/getters_eth1.go b/beacon-chain/state/v1/getters_eth1.go index 13376c4007..4f1ad84d12 100644 --- a/beacon-chain/state/v1/getters_eth1.go +++ b/beacon-chain/state/v1/getters_eth1.go @@ -1,10 +1,6 @@ package v1 import ( - "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/config/features" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) @@ -93,53 +89,3 @@ func (b *BeaconState) eth1DepositIndex() uint64 { return b.state.Eth1DepositIndex } - -// eth1Root computes the HashTreeRoot Merkleization of -// a BeaconBlockHeader struct according to the Ethereum -// Simple Serialize specification. -func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) { - if eth1Data == nil { - return [32]byte{}, errors.New("nil eth1 data") - } - - enc := stateutil.Eth1DataEncKey(eth1Data) - if features.Get().EnableSSZCache { - if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil { - return found.([32]byte), nil - } - } - - root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data) - if err != nil { - return [32]byte{}, err - } - - if features.Get().EnableSSZCache { - cachedHasher.rootsCache.Set(string(enc), root, 32) - } - return root, nil -} - -// eth1DataVotesRoot computes the HashTreeRoot Merkleization of -// a list of Eth1Data structs according to the Ethereum -// Simple Serialize specification. -func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) { - hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes) - if err != nil { - return [32]byte{}, err - } - - if features.Get().EnableSSZCache { - if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil { - return found.([32]byte), nil - } - } - root, err := stateutil.Eth1DatasRoot(eth1DataVotes) - if err != nil { - return [32]byte{}, err - } - if features.Get().EnableSSZCache { - cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32) - } - return root, nil -} diff --git a/beacon-chain/state/v1/getters_misc.go b/beacon-chain/state/v1/getters_misc.go index d10b92d561..8cb95cd649 100644 --- a/beacon-chain/state/v1/getters_misc.go +++ b/beacon-chain/state/v1/getters_misc.go @@ -1,14 +1,9 @@ package v1 import ( - "github.com/pkg/errors" types "github.com/prysmaticlabs/eth2-types" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/config/features" "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" "github.com/prysmaticlabs/prysm/encoding/bytesutil" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/runtime/version" ) @@ -166,150 +161,3 @@ func (b *BeaconState) balancesLength() int { return len(b.state.Balances) } - -// RootsArrayHashTreeRoot computes the Merkle root of arrays of 32-byte hashes, such as [64][32]byte -// according to the Simple Serialize specification of Ethereum. -func RootsArrayHashTreeRoot(vals [][]byte, length uint64, fieldName string) ([32]byte, error) { - if features.Get().EnableSSZCache { - return cachedHasher.arraysRoot(vals, length, fieldName) - } - return nocachedHasher.arraysRoot(vals, length, fieldName) -} - -func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) { - lock.Lock() - defer lock.Unlock() - hashFunc := hash.CustomSHA256Hasher() - if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil { - depth := ssz.Depth(length) - layersCache[fieldName] = make([][][32]byte, depth+1) - } - - leaves := make([][32]byte, length) - for i, chunk := range input { - copy(leaves[i][:], chunk) - } - bytesProcessed := 0 - changedIndices := make([]int, 0) - prevLeaves, ok := leavesCache[fieldName] - if len(prevLeaves) == 0 || h.rootsCache == nil { - prevLeaves = leaves - } - - for i := 0; i < len(leaves); i++ { - // We check if any items changed since the roots were last recomputed. - notEqual := leaves[i] != prevLeaves[i] - if ok && h.rootsCache != nil && notEqual { - changedIndices = append(changedIndices, i) - } - bytesProcessed += 32 - } - if len(changedIndices) > 0 && h.rootsCache != nil { - var rt [32]byte - var err error - // If indices did change since last computation, we only recompute - // the modified branches in the cached Merkle tree for this state field. - chunks := leaves - - // We need to ensure we recompute indices of the Merkle tree which - // changed in-between calls to this function. This check adds an offset - // to the recomputed indices to ensure we do so evenly. - maxChangedIndex := changedIndices[len(changedIndices)-1] - if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 { - changedIndices = append(changedIndices, maxChangedIndex+1) - } - for i := 0; i < len(changedIndices); i++ { - rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc) - if err != nil { - return [32]byte{}, err - } - } - leavesCache[fieldName] = chunks - return rt, nil - } - - res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc) - if err != nil { - return [32]byte{}, err - } - if h.rootsCache != nil { - leavesCache[fieldName] = leaves - } - return res, nil -} - -func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) { - items, ok := layersCache[fieldName] - if !ok { - return [32]byte{}, errors.New("could not recompute root as there was no cache found") - } - if items == nil { - return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache") - } - layers := items - root := chunks[idx] - layers[0] = chunks - // The merkle tree structure looks as follows: - // [[r1, r2, r3, r4], [parent1, parent2], [root]] - // Using information about the index which changed, idx, we recompute - // only its branch up the tree. - currentIndex := idx - for i := 0; i < len(layers)-1; i++ { - isLeft := currentIndex%2 == 0 - neighborIdx := currentIndex ^ 1 - - neighbor := [32]byte{} - if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) { - neighbor = layers[i][neighborIdx] - } - if isLeft { - parentHash := hasher(append(root[:], neighbor[:]...)) - root = parentHash - } else { - parentHash := hasher(append(neighbor[:], root[:]...)) - root = parentHash - } - parentIdx := currentIndex / 2 - // Update the cached layers at the parent index. - if len(layers[i+1]) == 0 { - layers[i+1] = append(layers[i+1], root) - } else { - layers[i+1][parentIdx] = root - } - currentIndex = parentIdx - } - layersCache[fieldName] = layers - // If there is only a single leaf, we return it (the identity element). - if len(layers[0]) == 1 { - return layers[0][0], nil - } - return root, nil -} - -func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64, - fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) { - if len(leaves) == 0 { - return [32]byte{}, errors.New("zero leaves provided") - } - if len(leaves) == 1 { - return leaves[0], nil - } - hashLayer := leaves - layers := make([][][32]byte, ssz.Depth(length)+1) - if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil { - if len(items[0]) == len(leaves) { - layers = items - } - } - layers[0] = hashLayer - var err error - layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher) - if err != nil { - return [32]byte{}, err - } - root := hashLayer[0] - if h.rootsCache != nil { - layersCache[fieldName] = layers - } - return root, nil -} diff --git a/beacon-chain/state/v1/getters_validator.go b/beacon-chain/state/v1/getters_validator.go index 2de0d79834..3123ec27ce 100644 --- a/beacon-chain/state/v1/getters_validator.go +++ b/beacon-chain/state/v1/getters_validator.go @@ -1,19 +1,12 @@ package v1 import ( - "bytes" - "encoding/binary" "fmt" "github.com/pkg/errors" types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/state" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/config/features" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" "github.com/prysmaticlabs/prysm/encoding/bytesutil" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" ) @@ -302,78 +295,3 @@ func (b *BeaconState) slashings() []uint64 { copy(res, b.state.Slashings) return res } - -func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) { - hashKeyElements := make([]byte, len(validators)*32) - roots := make([][32]byte, len(validators)) - emptyKey := hash.FastSum256(hashKeyElements) - hasher := hash.CustomSHA256Hasher() - bytesProcessed := 0 - for i := 0; i < len(validators); i++ { - val, err := h.validatorRoot(hasher, validators[i]) - if err != nil { - return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization") - } - copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:]) - roots[i] = val - bytesProcessed += 32 - } - - hashKey := hash.FastSum256(hashKeyElements) - if hashKey != emptyKey && h.rootsCache != nil { - if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok { - return found.([32]byte), nil - } - } - - validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit) - if err != nil { - return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization") - } - validatorsRootsBuf := new(bytes.Buffer) - if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil { - return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length") - } - // We need to mix in the length of the slice. - var validatorsRootsBufRoot [32]byte - copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes()) - res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:]) - if hashKey != emptyKey && h.rootsCache != nil { - h.rootsCache.Set(string(hashKey[:]), res, 32) - } - return res, nil -} - -func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) { - if validator == nil { - return [32]byte{}, errors.New("nil validator") - } - - enc := stateutil.ValidatorEncKey(validator) - // Check if it exists in cache: - if h.rootsCache != nil { - if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok { - return found.([32]byte), nil - } - } - - valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator) - if err != nil { - return [32]byte{}, err - } - - if h.rootsCache != nil { - h.rootsCache.Set(string(enc), valRoot, 32) - } - return valRoot, nil -} - -// ValidatorRegistryRoot computes the HashTreeRoot Merkleization of -// a list of validator structs according to the Ethereum -// Simple Serialize specification. -func ValidatorRegistryRoot(vals []*ethpb.Validator) ([32]byte, error) { - if features.Get().EnableSSZCache { - return cachedHasher.validatorRegistryRoot(vals) - } - return nocachedHasher.validatorRegistryRoot(vals) -} diff --git a/beacon-chain/state/v1/getters_validator_test.go b/beacon-chain/state/v1/getters_validator_test.go index 01af50c2ed..cb2834976a 100644 --- a/beacon-chain/state/v1/getters_validator_test.go +++ b/beacon-chain/state/v1/getters_validator_test.go @@ -18,19 +18,3 @@ func TestBeaconState_ValidatorAtIndexReadOnly_HandlesNilSlice(t *testing.T) { _, err = st.ValidatorAtIndexReadOnly(0) assert.Equal(t, v1.ErrNilValidatorsInState, err) } - -func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) { - _, err := v1.RootsArrayHashTreeRoot([][]byte{}, 1, "testing") - assert.NoError(t, err) - _, err = v1.RootsArrayHashTreeRoot([][]byte{}, 4, "testing") - assert.NoError(t, err) - _, err = v1.RootsArrayHashTreeRoot([][]byte{}, 8, "testing") - assert.NoError(t, err) - _, err = v1.RootsArrayHashTreeRoot([][]byte{}, 10, "testing") - assert.ErrorContains(t, "hash layer is a non power of 2", err) -} - -func TestArraysTreeRoot_ZeroLength(t *testing.T) { - _, err := v1.RootsArrayHashTreeRoot([][]byte{}, 0, "testing") - assert.ErrorContains(t, "zero leaves provided", err) -} diff --git a/beacon-chain/state/v1/state_trie.go b/beacon-chain/state/v1/state_trie.go index 2c862ca034..a69ce22181 100644 --- a/beacon-chain/state/v1/state_trie.go +++ b/beacon-chain/state/v1/state_trie.go @@ -294,7 +294,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) case historicalRoots: return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) case eth1Data: - return eth1Root(hasher, b.state.Eth1Data) + return stateutil.Eth1Root(hasher, b.state.Eth1Data) case eth1DataVotes: if b.rebuildTrie[field] { err := b.resetFieldTrie( diff --git a/beacon-chain/state/v2/BUILD.bazel b/beacon-chain/state/v2/BUILD.bazel index 063fbef75b..797ef4ecf4 100644 --- a/beacon-chain/state/v2/BUILD.bazel +++ b/beacon-chain/state/v2/BUILD.bazel @@ -5,9 +5,6 @@ go_library( srcs = [ "deprecated_getters.go", "deprecated_setters.go", - "field_root_eth1.go", - "field_root_validator.go", - "field_root_vector.go", "field_roots.go", "getters_block.go", "getters_checkpoint.go", @@ -51,7 +48,6 @@ go_library( "//encoding/ssz:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//runtime/version:go_default_library", - "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", @@ -67,7 +63,6 @@ go_test( srcs = [ "deprecated_getters_test.go", "deprecated_setters_test.go", - "field_root_test.go", "getters_block_test.go", "getters_test.go", "getters_validator_test.go", diff --git a/beacon-chain/state/v2/field_root_test.go b/beacon-chain/state/v2/field_root_test.go deleted file mode 100644 index 5fb530df3d..0000000000 --- a/beacon-chain/state/v2/field_root_test.go +++ /dev/null @@ -1,23 +0,0 @@ -package v2 - -import ( - "testing" - - "github.com/prysmaticlabs/prysm/testing/assert" -) - -func TestArraysTreeRoot_OnlyPowerOf2(t *testing.T) { - _, err := nocachedHasher.arraysRoot([][]byte{}, 1, "testing") - assert.NoError(t, err) - _, err = nocachedHasher.arraysRoot([][]byte{}, 4, "testing") - assert.NoError(t, err) - _, err = nocachedHasher.arraysRoot([][]byte{}, 8, "testing") - assert.NoError(t, err) - _, err = nocachedHasher.arraysRoot([][]byte{}, 10, "testing") - assert.ErrorContains(t, "hash layer is a non power of 2", err) -} - -func TestArraysTreeRoot_ZeroLength(t *testing.T) { - _, err := nocachedHasher.arraysRoot([][]byte{}, 0, "testing") - assert.ErrorContains(t, "zero leaves provided", err) -} diff --git a/beacon-chain/state/v2/field_root_validator.go b/beacon-chain/state/v2/field_root_validator.go deleted file mode 100644 index 1f502d1fda..0000000000 --- a/beacon-chain/state/v2/field_root_validator.go +++ /dev/null @@ -1,78 +0,0 @@ -package v2 - -import ( - "bytes" - "encoding/binary" - - "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" - "github.com/prysmaticlabs/prysm/encoding/ssz" - ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" -) - -func (h *stateRootHasher) validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) { - hashKeyElements := make([]byte, len(validators)*32) - roots := make([][32]byte, len(validators)) - emptyKey := hash.FastSum256(hashKeyElements) - hasher := hash.CustomSHA256Hasher() - bytesProcessed := 0 - for i := 0; i < len(validators); i++ { - val, err := h.validatorRoot(hasher, validators[i]) - if err != nil { - return [32]byte{}, errors.Wrap(err, "could not compute validators merkleization") - } - copy(hashKeyElements[bytesProcessed:bytesProcessed+32], val[:]) - roots[i] = val - bytesProcessed += 32 - } - - hashKey := hash.FastSum256(hashKeyElements) - if hashKey != emptyKey && h.rootsCache != nil { - if found, ok := h.rootsCache.Get(string(hashKey[:])); found != nil && ok { - return found.([32]byte), nil - } - } - - validatorsRootsRoot, err := ssz.BitwiseMerkleizeArrays(hasher, roots, uint64(len(roots)), params.BeaconConfig().ValidatorRegistryLimit) - if err != nil { - return [32]byte{}, errors.Wrap(err, "could not compute validator registry merkleization") - } - validatorsRootsBuf := new(bytes.Buffer) - if err := binary.Write(validatorsRootsBuf, binary.LittleEndian, uint64(len(validators))); err != nil { - return [32]byte{}, errors.Wrap(err, "could not marshal validator registry length") - } - // We need to mix in the length of the slice. - var validatorsRootsBufRoot [32]byte - copy(validatorsRootsBufRoot[:], validatorsRootsBuf.Bytes()) - res := ssz.MixInLength(validatorsRootsRoot, validatorsRootsBufRoot[:]) - if hashKey != emptyKey && h.rootsCache != nil { - h.rootsCache.Set(string(hashKey[:]), res, 32) - } - return res, nil -} - -func (h *stateRootHasher) validatorRoot(hasher ssz.HashFn, validator *ethpb.Validator) ([32]byte, error) { - if validator == nil { - return [32]byte{}, errors.New("nil validator") - } - - enc := stateutil.ValidatorEncKey(validator) - // Check if it exists in cache: - if h.rootsCache != nil { - if found, ok := h.rootsCache.Get(string(enc)); found != nil && ok { - return found.([32]byte), nil - } - } - - valRoot, err := stateutil.ValidatorRootWithHasher(hasher, validator) - if err != nil { - return [32]byte{}, err - } - - if h.rootsCache != nil { - h.rootsCache.Set(string(enc), valRoot, 32) - } - return valRoot, nil -} diff --git a/beacon-chain/state/v2/field_root_vector.go b/beacon-chain/state/v2/field_root_vector.go deleted file mode 100644 index 64b1d966d5..0000000000 --- a/beacon-chain/state/v2/field_root_vector.go +++ /dev/null @@ -1,146 +0,0 @@ -package v2 - -import ( - "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/crypto/hash" - "github.com/prysmaticlabs/prysm/encoding/ssz" -) - -func (h *stateRootHasher) arraysRoot(input [][]byte, length uint64, fieldName string) ([32]byte, error) { - lock.Lock() - defer lock.Unlock() - hashFunc := hash.CustomSHA256Hasher() - if _, ok := layersCache[fieldName]; !ok && h.rootsCache != nil { - depth := ssz.Depth(length) - layersCache[fieldName] = make([][][32]byte, depth+1) - } - - leaves := make([][32]byte, length) - for i, chunk := range input { - copy(leaves[i][:], chunk) - } - bytesProcessed := 0 - changedIndices := make([]int, 0) - prevLeaves, ok := leavesCache[fieldName] - if len(prevLeaves) == 0 || h.rootsCache == nil { - prevLeaves = leaves - } - - for i := 0; i < len(leaves); i++ { - // We check if any items changed since the roots were last recomputed. - notEqual := leaves[i] != prevLeaves[i] - if ok && h.rootsCache != nil && notEqual { - changedIndices = append(changedIndices, i) - } - bytesProcessed += 32 - } - if len(changedIndices) > 0 && h.rootsCache != nil { - var rt [32]byte - var err error - // If indices did change since last computation, we only recompute - // the modified branches in the cached Merkle tree for this state field. - chunks := leaves - - // We need to ensure we recompute indices of the Merkle tree which - // changed in-between calls to this function. This check adds an offset - // to the recomputed indices to ensure we do so evenly. - maxChangedIndex := changedIndices[len(changedIndices)-1] - if maxChangedIndex+2 == len(chunks) && maxChangedIndex%2 != 0 { - changedIndices = append(changedIndices, maxChangedIndex+1) - } - for i := 0; i < len(changedIndices); i++ { - rt, err = recomputeRoot(changedIndices[i], chunks, fieldName, hashFunc) - if err != nil { - return [32]byte{}, err - } - } - leavesCache[fieldName] = chunks - return rt, nil - } - - res, err := h.merkleizeWithCache(leaves, length, fieldName, hashFunc) - if err != nil { - return [32]byte{}, err - } - if h.rootsCache != nil { - leavesCache[fieldName] = leaves - } - return res, nil -} - -func recomputeRoot(idx int, chunks [][32]byte, fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) { - items, ok := layersCache[fieldName] - if !ok { - return [32]byte{}, errors.New("could not recompute root as there was no cache found") - } - if items == nil { - return [32]byte{}, errors.New("could not recompute root as there were no items found in the layers cache") - } - layers := items - root := chunks[idx] - layers[0] = chunks - // The merkle tree structure looks as follows: - // [[r1, r2, r3, r4], [parent1, parent2], [root]] - // Using information about the index which changed, idx, we recompute - // only its branch up the tree. - currentIndex := idx - for i := 0; i < len(layers)-1; i++ { - isLeft := currentIndex%2 == 0 - neighborIdx := currentIndex ^ 1 - - neighbor := [32]byte{} - if layers[i] != nil && len(layers[i]) != 0 && neighborIdx < len(layers[i]) { - neighbor = layers[i][neighborIdx] - } - if isLeft { - parentHash := hasher(append(root[:], neighbor[:]...)) - root = parentHash - } else { - parentHash := hasher(append(neighbor[:], root[:]...)) - root = parentHash - } - parentIdx := currentIndex / 2 - // Update the cached layers at the parent index. - if len(layers[i+1]) == 0 { - layers[i+1] = append(layers[i+1], root) - } else { - layers[i+1][parentIdx] = root - } - currentIndex = parentIdx - } - layersCache[fieldName] = layers - // If there is only a single leaf, we return it (the identity element). - if len(layers[0]) == 1 { - return layers[0][0], nil - } - return root, nil -} - -func (h *stateRootHasher) merkleizeWithCache(leaves [][32]byte, length uint64, - fieldName string, hasher func([]byte) [32]byte) ([32]byte, error) { - if len(leaves) == 0 { - return [32]byte{}, errors.New("zero leaves provided") - } - if len(leaves) == 1 { - return leaves[0], nil - } - hashLayer := leaves - layers := make([][][32]byte, ssz.Depth(length)+1) - if items, ok := layersCache[fieldName]; ok && h.rootsCache != nil { - if len(items[0]) == len(leaves) { - layers = items - } - } - layers[0] = hashLayer - var err error - layers, hashLayer, err = stateutil.MerkleizeTrieLeaves(layers, hashLayer, hasher) - if err != nil { - return [32]byte{}, err - } - root := hashLayer[0] - if h.rootsCache != nil { - layersCache[fieldName] = layers - } - return root, nil -} diff --git a/beacon-chain/state/v2/field_roots.go b/beacon-chain/state/v2/field_roots.go index 51217c5594..1b5a438bc1 100644 --- a/beacon-chain/state/v2/field_roots.go +++ b/beacon-chain/state/v2/field_roots.go @@ -2,225 +2,17 @@ package v2 import ( "context" - "encoding/binary" - "sync" - "github.com/dgraph-io/ristretto" - "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/config/features" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" - "github.com/prysmaticlabs/prysm/encoding/bytesutil" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" - "go.opencensus.io/trace" ) -var ( - leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateAltairFieldCount) - layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateAltairFieldCount) - lock sync.RWMutex -) - -const cacheSize = 100000 - -var nocachedHasher *stateRootHasher -var cachedHasher *stateRootHasher - -func init() { - rootsCache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: cacheSize, // number of keys to track frequency of (1M). - MaxCost: 1 << 22, // maximum cost of cache (3MB). - // 100,000 roots will take up approximately 3 MB in memory. - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - // Temporarily disable roots cache until cache issues can be resolved. - cachedHasher = &stateRootHasher{rootsCache: rootsCache} - nocachedHasher = &stateRootHasher{} -} - -type stateRootHasher struct { - rootsCache *ristretto.Cache -} - // computeFieldRoots returns the hash tree root computations of every field in // the beacon state as a list of 32 byte roots. func computeFieldRoots(ctx context.Context, state *ethpb.BeaconStateAltair) ([][]byte, error) { if features.Get().EnableSSZCache { - return cachedHasher.computeFieldRootsWithHasher(ctx, state) + return stateutil.CachedHasher.ComputeFieldRootsWithHasherAltair(ctx, state) } - return nocachedHasher.computeFieldRootsWithHasher(ctx, state) -} - -func (h *stateRootHasher) computeFieldRootsWithHasher(ctx context.Context, state *ethpb.BeaconStateAltair) ([][]byte, error) { - ctx, span := trace.StartSpan(ctx, "beaconState.computeFieldRootsWithHasher") - defer span.End() - - if state == nil { - return nil, errors.New("nil state") - } - hasher := hash.CustomSHA256Hasher() - fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateAltairFieldCount) - - // Genesis time root. - genesisRoot := ssz.Uint64Root(state.GenesisTime) - fieldRoots[0] = genesisRoot[:] - - // Genesis validator root. - r := [32]byte{} - copy(r[:], state.GenesisValidatorsRoot) - fieldRoots[1] = r[:] - - // Slot root. - slotRoot := ssz.Uint64Root(uint64(state.Slot)) - fieldRoots[2] = slotRoot[:] - - // Fork data structure root. - forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) - if err != nil { - return nil, errors.Wrap(err, "could not compute fork merkleization") - } - fieldRoots[3] = forkHashTreeRoot[:] - - // BeaconBlockHeader data structure root. - headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader) - if err != nil { - return nil, errors.Wrap(err, "could not compute block header merkleization") - } - fieldRoots[4] = headerHashTreeRoot[:] - - // BlockRoots array root. - blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") - if err != nil { - return nil, errors.Wrap(err, "could not compute block roots merkleization") - } - fieldRoots[5] = blockRootsRoot[:] - - // StateRoots array root. - stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") - if err != nil { - return nil, errors.Wrap(err, "could not compute state roots merkleization") - } - fieldRoots[6] = stateRootsRoot[:] - - // HistoricalRoots slice root. - historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) - if err != nil { - return nil, errors.Wrap(err, "could not compute historical roots merkleization") - } - fieldRoots[7] = historicalRootsRt[:] - - // Eth1Data data structure root. - eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data) - if err != nil { - return nil, errors.Wrap(err, "could not compute eth1data merkleization") - } - fieldRoots[8] = eth1HashTreeRoot[:] - - // Eth1DataVotes slice root. - eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) - if err != nil { - return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") - } - fieldRoots[9] = eth1VotesRoot[:] - - // Eth1DepositIndex root. - eth1DepositIndexBuf := make([]byte, 8) - binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) - eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) - fieldRoots[10] = eth1DepositBuf[:] - - // Validators slice root. - validatorsRoot, err := h.validatorRegistryRoot(state.Validators) - if err != nil { - return nil, errors.Wrap(err, "could not compute validator registry merkleization") - } - fieldRoots[11] = validatorsRoot[:] - - // Balances slice root. - balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances) - if err != nil { - return nil, errors.Wrap(err, "could not compute validator balances merkleization") - } - fieldRoots[12] = balancesRoot[:] - - // RandaoMixes array root. - randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") - if err != nil { - return nil, errors.Wrap(err, "could not compute randao roots merkleization") - } - fieldRoots[13] = randaoRootsRoot[:] - - // Slashings array root. - slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) - if err != nil { - return nil, errors.Wrap(err, "could not compute slashings merkleization") - } - fieldRoots[14] = slashingsRootsRoot[:] - - // PreviousEpochParticipation slice root. - prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation) - if err != nil { - return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization") - } - fieldRoots[15] = prevParticipationRoot[:] - - // CurrentEpochParticipation slice root. - currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation) - if err != nil { - return nil, errors.Wrap(err, "could not compute current epoch participation merkleization") - } - fieldRoots[16] = currParticipationRoot[:] - - // JustificationBits root. - justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) - fieldRoots[17] = justifiedBitsRoot[:] - - // PreviousJustifiedCheckpoint data structure root. - prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") - } - fieldRoots[18] = prevCheckRoot[:] - - // CurrentJustifiedCheckpoint data structure root. - currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") - } - fieldRoots[19] = currJustRoot[:] - - // FinalizedCheckpoint data structure root. - finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") - } - fieldRoots[20] = finalRoot[:] - - // Inactivity scores root. - inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores) - if err != nil { - return nil, errors.Wrap(err, "could not compute inactivityScoreRoot") - } - fieldRoots[21] = inactivityScoresRoot[:] - - // Current sync committee root. - currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee) - if err != nil { - return nil, errors.Wrap(err, "could not compute sync committee merkleization") - } - fieldRoots[22] = currentSyncCommitteeRoot[:] - - // Next sync committee root. - nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee) - if err != nil { - return nil, errors.Wrap(err, "could not compute sync committee merkleization") - } - fieldRoots[23] = nextSyncCommitteeRoot[:] - - return fieldRoots, nil + return stateutil.NocachedHasher.ComputeFieldRootsWithHasherAltair(ctx, state) } diff --git a/beacon-chain/state/v2/state_trie.go b/beacon-chain/state/v2/state_trie.go index 8015605ab0..6bd873259d 100644 --- a/beacon-chain/state/v2/state_trie.go +++ b/beacon-chain/state/v2/state_trie.go @@ -299,7 +299,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) case historicalRoots: return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) case eth1Data: - return eth1Root(hasher, b.state.Eth1Data) + return stateutil.Eth1Root(hasher, b.state.Eth1Data) case eth1DataVotes: if b.rebuildTrie[field] { err := b.resetFieldTrie( diff --git a/beacon-chain/state/v3/BUILD.bazel b/beacon-chain/state/v3/BUILD.bazel index 1f7735d4eb..3f602eacd6 100644 --- a/beacon-chain/state/v3/BUILD.bazel +++ b/beacon-chain/state/v3/BUILD.bazel @@ -5,9 +5,6 @@ go_library( srcs = [ "deprecated_getters.go", "deprecated_setters.go", - "field_root_eth1.go", - "field_root_validator.go", - "field_root_vector.go", "field_roots.go", "getters_block.go", "getters_checkpoint.go", @@ -51,7 +48,6 @@ go_library( "//encoding/ssz:go_default_library", "//proto/prysm/v1alpha1:go_default_library", "//runtime/version:go_default_library", - "@com_github_dgraph_io_ristretto//:go_default_library", "@com_github_pkg_errors//:go_default_library", "@com_github_prometheus_client_golang//prometheus:go_default_library", "@com_github_prometheus_client_golang//prometheus/promauto:go_default_library", @@ -67,7 +63,6 @@ go_test( srcs = [ "deprecated_getters_test.go", "deprecated_setters_test.go", - "field_root_test.go", "getters_block_test.go", "getters_test.go", "getters_validator_test.go", diff --git a/beacon-chain/state/v3/field_root_eth1.go b/beacon-chain/state/v3/field_root_eth1.go deleted file mode 100644 index a02804484e..0000000000 --- a/beacon-chain/state/v3/field_root_eth1.go +++ /dev/null @@ -1,59 +0,0 @@ -package v3 - -import ( - "github.com/pkg/errors" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" - "github.com/prysmaticlabs/prysm/config/features" - "github.com/prysmaticlabs/prysm/encoding/ssz" - ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" -) - -// eth1Root computes the HashTreeRoot Merkleization of -// a BeaconBlockHeader struct according to the eth2 -// Simple Serialize specification. -func eth1Root(hasher ssz.HashFn, eth1Data *ethpb.Eth1Data) ([32]byte, error) { - if eth1Data == nil { - return [32]byte{}, errors.New("nil eth1 data") - } - - enc := stateutil.Eth1DataEncKey(eth1Data) - if features.Get().EnableSSZCache { - if found, ok := cachedHasher.rootsCache.Get(string(enc)); ok && found != nil { - return found.([32]byte), nil - } - } - - root, err := stateutil.Eth1DataRootWithHasher(hasher, eth1Data) - if err != nil { - return [32]byte{}, err - } - - if features.Get().EnableSSZCache { - cachedHasher.rootsCache.Set(string(enc), root, 32) - } - return root, nil -} - -// eth1DataVotesRoot computes the HashTreeRoot Merkleization of -// a list of Eth1Data structs according to the eth2 -// Simple Serialize specification. -func eth1DataVotesRoot(eth1DataVotes []*ethpb.Eth1Data) ([32]byte, error) { - hashKey, err := stateutil.Eth1DatasEncKey(eth1DataVotes) - if err != nil { - return [32]byte{}, err - } - - if features.Get().EnableSSZCache { - if found, ok := cachedHasher.rootsCache.Get(string(hashKey[:])); ok && found != nil { - return found.([32]byte), nil - } - } - root, err := stateutil.Eth1DatasRoot(eth1DataVotes) - if err != nil { - return [32]byte{}, err - } - if features.Get().EnableSSZCache { - cachedHasher.rootsCache.Set(string(hashKey[:]), root, 32) - } - return root, nil -} diff --git a/beacon-chain/state/v3/field_roots.go b/beacon-chain/state/v3/field_roots.go index 5accf3ba3d..c127065c13 100644 --- a/beacon-chain/state/v3/field_roots.go +++ b/beacon-chain/state/v3/field_roots.go @@ -2,232 +2,18 @@ package v3 import ( "context" - "encoding/binary" - "sync" - "github.com/dgraph-io/ristretto" - "github.com/pkg/errors" "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" "github.com/prysmaticlabs/prysm/config/features" - "github.com/prysmaticlabs/prysm/config/params" - "github.com/prysmaticlabs/prysm/crypto/hash" - "github.com/prysmaticlabs/prysm/encoding/bytesutil" - "github.com/prysmaticlabs/prysm/encoding/ssz" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" - "go.opencensus.io/trace" ) -var ( - leavesCache = make(map[string][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount) - layersCache = make(map[string][][][32]byte, params.BeaconConfig().BeaconStateMergeFieldCount) - lock sync.RWMutex -) - -const cacheSize = 100000 - -var nocachedHasher *stateRootHasher -var cachedHasher *stateRootHasher - -func init() { - rootsCache, err := ristretto.NewCache(&ristretto.Config{ - NumCounters: cacheSize, // number of keys to track frequency of (1M). - MaxCost: 1 << 22, // maximum cost of cache (3MB). - // 100,000 roots will take up approximately 3 MB in memory. - BufferItems: 64, // number of keys per Get buffer. - }) - if err != nil { - panic(err) - } - // Temporarily disable roots cache until cache issues can be resolved. - cachedHasher = &stateRootHasher{rootsCache: rootsCache} - nocachedHasher = &stateRootHasher{} -} - -type stateRootHasher struct { - rootsCache *ristretto.Cache -} - // computeFieldRoots returns the hash tree root computations of every field in // the beacon state as a list of 32 byte roots. +//nolint:deadcode func computeFieldRoots(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) { if features.Get().EnableSSZCache { - return cachedHasher.computeFieldRootsWithHasher(ctx, state) + return stateutil.CachedHasher.ComputeFieldRootsWithHasherMerge(ctx, state) } - return nocachedHasher.computeFieldRootsWithHasher(ctx, state) -} - -func (h *stateRootHasher) computeFieldRootsWithHasher(ctx context.Context, state *ethpb.BeaconStateMerge) ([][]byte, error) { - ctx, span := trace.StartSpan(ctx, "beaconState.computeFieldRootsWithHasher") - defer span.End() - - if state == nil { - return nil, errors.New("nil state") - } - hasher := hash.CustomSHA256Hasher() - fieldRoots := make([][]byte, params.BeaconConfig().BeaconStateMergeFieldCount) - - // Genesis time root. - genesisRoot := ssz.Uint64Root(state.GenesisTime) - fieldRoots[0] = genesisRoot[:] - - // Genesis validator root. - r := [32]byte{} - copy(r[:], state.GenesisValidatorsRoot) - fieldRoots[1] = r[:] - - // Slot root. - slotRoot := ssz.Uint64Root(uint64(state.Slot)) - fieldRoots[2] = slotRoot[:] - - // Fork data structure root. - forkHashTreeRoot, err := ssz.ForkRoot(state.Fork) - if err != nil { - return nil, errors.Wrap(err, "could not compute fork merkleization") - } - fieldRoots[3] = forkHashTreeRoot[:] - - // BeaconBlockHeader data structure root. - headerHashTreeRoot, err := stateutil.BlockHeaderRoot(state.LatestBlockHeader) - if err != nil { - return nil, errors.Wrap(err, "could not compute block header merkleization") - } - fieldRoots[4] = headerHashTreeRoot[:] - - // BlockRoots array root. - blockRootsRoot, err := h.arraysRoot(state.BlockRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "BlockRoots") - if err != nil { - return nil, errors.Wrap(err, "could not compute block roots merkleization") - } - fieldRoots[5] = blockRootsRoot[:] - - // StateRoots array root. - stateRootsRoot, err := h.arraysRoot(state.StateRoots, uint64(params.BeaconConfig().SlotsPerHistoricalRoot), "StateRoots") - if err != nil { - return nil, errors.Wrap(err, "could not compute state roots merkleization") - } - fieldRoots[6] = stateRootsRoot[:] - - // HistoricalRoots slice root. - historicalRootsRt, err := ssz.ByteArrayRootWithLimit(state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) - if err != nil { - return nil, errors.Wrap(err, "could not compute historical roots merkleization") - } - fieldRoots[7] = historicalRootsRt[:] - - // Eth1Data data structure root. - eth1HashTreeRoot, err := eth1Root(hasher, state.Eth1Data) - if err != nil { - return nil, errors.Wrap(err, "could not compute eth1data merkleization") - } - fieldRoots[8] = eth1HashTreeRoot[:] - - // Eth1DataVotes slice root. - eth1VotesRoot, err := eth1DataVotesRoot(state.Eth1DataVotes) - if err != nil { - return nil, errors.Wrap(err, "could not compute eth1data votes merkleization") - } - fieldRoots[9] = eth1VotesRoot[:] - - // Eth1DepositIndex root. - eth1DepositIndexBuf := make([]byte, 8) - binary.LittleEndian.PutUint64(eth1DepositIndexBuf, state.Eth1DepositIndex) - eth1DepositBuf := bytesutil.ToBytes32(eth1DepositIndexBuf) - fieldRoots[10] = eth1DepositBuf[:] - - // Validators slice root. - validatorsRoot, err := h.validatorRegistryRoot(state.Validators) - if err != nil { - return nil, errors.Wrap(err, "could not compute validator registry merkleization") - } - fieldRoots[11] = validatorsRoot[:] - - // Balances slice root. - balancesRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.Balances) - if err != nil { - return nil, errors.Wrap(err, "could not compute validator balances merkleization") - } - fieldRoots[12] = balancesRoot[:] - - // RandaoMixes array root. - randaoRootsRoot, err := h.arraysRoot(state.RandaoMixes, uint64(params.BeaconConfig().EpochsPerHistoricalVector), "RandaoMixes") - if err != nil { - return nil, errors.Wrap(err, "could not compute randao roots merkleization") - } - fieldRoots[13] = randaoRootsRoot[:] - - // Slashings array root. - slashingsRootsRoot, err := ssz.SlashingsRoot(state.Slashings) - if err != nil { - return nil, errors.Wrap(err, "could not compute slashings merkleization") - } - fieldRoots[14] = slashingsRootsRoot[:] - - // PreviousEpochParticipation slice root. - prevParticipationRoot, err := stateutil.ParticipationBitsRoot(state.PreviousEpochParticipation) - if err != nil { - return nil, errors.Wrap(err, "could not compute previous epoch participation merkleization") - } - fieldRoots[15] = prevParticipationRoot[:] - - // CurrentEpochParticipation slice root. - currParticipationRoot, err := stateutil.ParticipationBitsRoot(state.CurrentEpochParticipation) - if err != nil { - return nil, errors.Wrap(err, "could not compute current epoch participation merkleization") - } - fieldRoots[16] = currParticipationRoot[:] - - // JustificationBits root. - justifiedBitsRoot := bytesutil.ToBytes32(state.JustificationBits) - fieldRoots[17] = justifiedBitsRoot[:] - - // PreviousJustifiedCheckpoint data structure root. - prevCheckRoot, err := ssz.CheckpointRoot(hasher, state.PreviousJustifiedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute previous justified checkpoint merkleization") - } - fieldRoots[18] = prevCheckRoot[:] - - // CurrentJustifiedCheckpoint data structure root. - currJustRoot, err := ssz.CheckpointRoot(hasher, state.CurrentJustifiedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute current justified checkpoint merkleization") - } - fieldRoots[19] = currJustRoot[:] - - // FinalizedCheckpoint data structure root. - finalRoot, err := ssz.CheckpointRoot(hasher, state.FinalizedCheckpoint) - if err != nil { - return nil, errors.Wrap(err, "could not compute finalized checkpoint merkleization") - } - fieldRoots[20] = finalRoot[:] - - // Inactivity scores root. - inactivityScoresRoot, err := stateutil.Uint64ListRootWithRegistryLimit(state.InactivityScores) - if err != nil { - return nil, errors.Wrap(err, "could not compute inactivityScoreRoot") - } - fieldRoots[21] = inactivityScoresRoot[:] - - // Current sync committee root. - currentSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.CurrentSyncCommittee) - if err != nil { - return nil, errors.Wrap(err, "could not compute sync committee merkleization") - } - fieldRoots[22] = currentSyncCommitteeRoot[:] - - // Next sync committee root. - nextSyncCommitteeRoot, err := stateutil.SyncCommitteeRoot(state.NextSyncCommittee) - if err != nil { - return nil, errors.Wrap(err, "could not compute sync committee merkleization") - } - fieldRoots[23] = nextSyncCommitteeRoot[:] - - // Execution payload root. - executionPayloadRoot, err := state.LatestExecutionPayloadHeader.HashTreeRoot() - if err != nil { - return nil, err - } - fieldRoots[24] = executionPayloadRoot[:] - - return fieldRoots, nil + return stateutil.NocachedHasher.ComputeFieldRootsWithHasherMerge(ctx, state) } diff --git a/beacon-chain/state/v3/state_trie.go b/beacon-chain/state/v3/state_trie.go index 813d27c0c2..c34571f85f 100644 --- a/beacon-chain/state/v3/state_trie.go +++ b/beacon-chain/state/v3/state_trie.go @@ -296,7 +296,7 @@ func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) case historicalRoots: return ssz.ByteArrayRootWithLimit(b.state.HistoricalRoots, params.BeaconConfig().HistoricalRootsLimit) case eth1Data: - return eth1Root(hasher, b.state.Eth1Data) + return stateutil.Eth1Root(hasher, b.state.Eth1Data) case eth1DataVotes: if b.rebuildTrie[field] { err := b.resetFieldTrie(field, b.state.Eth1DataVotes, uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().EpochsPerEth1VotingPeriod)))) diff --git a/testing/util/BUILD.bazel b/testing/util/BUILD.bazel index 4035f60600..ca54c0d0f4 100644 --- a/testing/util/BUILD.bazel +++ b/testing/util/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//beacon-chain/core/transition:go_default_library", "//beacon-chain/p2p/types:go_default_library", "//beacon-chain/state:go_default_library", + "//beacon-chain/state/stateutil:go_default_library", "//beacon-chain/state/v1:go_default_library", "//beacon-chain/state/v2:go_default_library", "//beacon-chain/state/v3:go_default_library", diff --git a/testing/util/altair.go b/testing/util/altair.go index 4940685806..dea3ed097b 100644 --- a/testing/util/altair.go +++ b/testing/util/altair.go @@ -15,7 +15,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/core/time" "github.com/prysmaticlabs/prysm/beacon-chain/core/transition" "github.com/prysmaticlabs/prysm/beacon-chain/state" - v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stateutil" stateAltair "github.com/prysmaticlabs/prysm/beacon-chain/state/v2" "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/crypto/bls" @@ -112,7 +112,7 @@ func buildGenesisBeaconState(genesisTime uint64, preState state.BeaconStateAltai slashings := make([]uint64, params.BeaconConfig().EpochsPerSlashingsVector) - genesisValidatorsRoot, err := v1.ValidatorRegistryRoot(preState.Validators()) + genesisValidatorsRoot, err := stateutil.ValidatorRegistryRoot(preState.Validators()) if err != nil { return nil, errors.Wrapf(err, "could not hash tree root genesis validators %v", err) } From d8aa0f8827eb9e06c4a2632f0c50b30cd9234c55 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Mon, 29 Nov 2021 19:27:26 -0800 Subject: [PATCH 29/45] Alter config filed name to devnet if it's not populated in file (#9949) --- config/params/loader.go | 9 ++++++++- config/params/loader_test.go | 1 + 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/config/params/loader.go b/config/params/loader.go index 1324309593..2ae2e6f493 100644 --- a/config/params/loader.go +++ b/config/params/loader.go @@ -20,6 +20,8 @@ func LoadChainConfigFile(chainConfigFileName string) { } // Default to using mainnet. conf := MainnetConfig().Copy() + // To track if config name is defined inside config file. + hasConfigName := false // Convert 0x hex inputs to fixed bytes arrays lines := strings.Split(string(yamlFile), "\n") for i, line := range lines { @@ -27,10 +29,12 @@ func LoadChainConfigFile(chainConfigFileName string) { if strings.HasPrefix(line, "DEPOSIT_CONTRACT_ADDRESS") { continue } + if strings.HasPrefix(line, "CONFIG_NAME") { + hasConfigName = true + } if strings.HasPrefix(line, "PRESET_BASE: 'minimal'") || strings.HasPrefix(line, "# Minimal preset") { conf = MinimalSpecConfig().Copy() } - if !strings.HasPrefix(line, "#") && strings.Contains(line, "0x") { parts := ReplaceHexStringWithYAMLFormat(line) lines[i] = strings.Join(parts, "\n") @@ -44,6 +48,9 @@ func LoadChainConfigFile(chainConfigFileName string) { log.WithError(err).Error("There were some issues parsing the config from a yaml file") } } + if !hasConfigName { + conf.ConfigName = "devnet" + } // recompute SqrRootSlotsPerEpoch constant to handle non-standard values of SlotsPerEpoch conf.SqrRootSlotsPerEpoch = types.Slot(math.IntegerSquareRoot(uint64(conf.SlotsPerEpoch))) log.Debugf("Config file values: %+v", conf) diff --git a/config/params/loader_test.go b/config/params/loader_test.go index 0a5dd856e6..a4688083f2 100644 --- a/config/params/loader_test.go +++ b/config/params/loader_test.go @@ -146,6 +146,7 @@ func TestLoadConfigFile_OverwriteCorrectly(t *testing.T) { params.MainnetConfig().SlotsPerEpoch, params.BeaconConfig().SlotsPerEpoch) } + require.Equal(t, "devnet", params.BeaconConfig().ConfigName) } func Test_replaceHexStringWithYAMLFormat(t *testing.T) { From 71d55d1cffeb4b9161f9bf77a37ad99b26765fe9 Mon Sep 17 00:00:00 2001 From: Potuz Date: Tue, 30 Nov 2021 07:40:59 -0300 Subject: [PATCH 30/45] Check for syncstatus before performing a voluntary exit (#9951) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: RadosÅ‚aw Kapka --- validator/accounts/BUILD.bazel | 1 + validator/accounts/accounts_exit.go | 15 +++++++++++++++ 2 files changed, 16 insertions(+) diff --git a/validator/accounts/BUILD.bazel b/validator/accounts/BUILD.bazel index 4da748bce8..c4be461500 100644 --- a/validator/accounts/BUILD.bazel +++ b/validator/accounts/BUILD.bazel @@ -54,6 +54,7 @@ go_library( "@com_github_urfave_cli_v2//:go_default_library", "@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library", "@org_golang_google_grpc//:go_default_library", + "@org_golang_google_protobuf//types/known/emptypb:go_default_library", ], ) diff --git a/validator/accounts/accounts_exit.go b/validator/accounts/accounts_exit.go index 8b6e1217fb..f20c4d1b72 100644 --- a/validator/accounts/accounts_exit.go +++ b/validator/accounts/accounts_exit.go @@ -24,6 +24,7 @@ import ( "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/urfave/cli/v2" "google.golang.org/grpc" + "google.golang.org/protobuf/types/known/emptypb" ) // PerformExitCfg for account voluntary exits. @@ -57,6 +58,20 @@ func ExitAccountsCli(cliCtx *cli.Context, r io.Reader) error { if err != nil { return err } + if nodeClient == nil { + return errors.New("Could not prepare beacon node client") + } + syncStatus, err := (*nodeClient).GetSyncStatus(cliCtx.Context, &emptypb.Empty{}) + if err != nil { + return err + } + if syncStatus == nil { + return errors.New("Could not get sync status") + } + + if (*syncStatus).Syncing { + return errors.New("Could not perform exit: beacon node is syncing.") + } cfg := PerformExitCfg{ *validatorClient, From 0e729389141e097c09f2459c92a46ee111ae5983 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 30 Nov 2021 07:41:07 -0800 Subject: [PATCH 31/45] Uncap error messages (#9952) --- validator/accounts/accounts_exit.go | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/validator/accounts/accounts_exit.go b/validator/accounts/accounts_exit.go index f20c4d1b72..b69530e794 100644 --- a/validator/accounts/accounts_exit.go +++ b/validator/accounts/accounts_exit.go @@ -59,18 +59,18 @@ func ExitAccountsCli(cliCtx *cli.Context, r io.Reader) error { return err } if nodeClient == nil { - return errors.New("Could not prepare beacon node client") + return errors.New("could not prepare beacon node client") } syncStatus, err := (*nodeClient).GetSyncStatus(cliCtx.Context, &emptypb.Empty{}) if err != nil { return err } if syncStatus == nil { - return errors.New("Could not get sync status") + return errors.New("could not get sync status") } if (*syncStatus).Syncing { - return errors.New("Could not perform exit: beacon node is syncing.") + return errors.New("could not perform exit: beacon node is syncing.") } cfg := PerformExitCfg{ From 2c921ec62843c219d576be0b7f991fa373d6ffa2 Mon Sep 17 00:00:00 2001 From: terence tsao Date: Tue, 30 Nov 2021 13:21:59 -0800 Subject: [PATCH 32/45] Update spec tests to v1.1.6 (#9955) * Update spec test to v1.1.6 * Update spec test to v1.1.6 --- WORKSPACE | 10 +++++----- config/params/loader_test.go | 2 +- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/WORKSPACE b/WORKSPACE index 4deb6434cc..346e6a741f 100644 --- a/WORKSPACE +++ b/WORKSPACE @@ -225,7 +225,7 @@ filegroup( url = "https://github.com/eth2-clients/slashing-protection-interchange-tests/archive/b8413ca42dc92308019d0d4db52c87e9e125c4e9.tar.gz", ) -consensus_spec_version = "v1.1.5" +consensus_spec_version = "v1.1.6" bls_test_version = "v0.1.1" @@ -241,7 +241,7 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "a7d7173d953494c0dfde432c9fc064c25d46d666b024749b3474ae0cdfc50050", + sha256 = "58dbf798e86017b5561af38f2217b99e9fa5b6be0e928b4c73dad6040bb94d65", url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/general.tar.gz" % consensus_spec_version, ) @@ -257,7 +257,7 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "f86872061588c0197516b23025d39e9365b4716c112218a618739dc0d6f4666a", + sha256 = "5be19f7fca9733686ca25dad5ae306327e98830ef6354549d1ddfc56c10e0e9a", url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/minimal.tar.gz" % consensus_spec_version, ) @@ -273,7 +273,7 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "7a06975360fd37fbb4694d0e06abb78d2a0835146c1d9b26d33569edff8b98f0", + sha256 = "cc110528fcf7ede049e6a05788c77f4a865c3110b49508149d61bb2a992bb896", url = "https://github.com/ethereum/consensus-spec-tests/releases/download/%s/mainnet.tar.gz" % consensus_spec_version, ) @@ -288,7 +288,7 @@ filegroup( visibility = ["//visibility:public"], ) """, - sha256 = "87d8089200163340484d61212fbdffbb5d9d03e1244622761dcb91e641a65761", + sha256 = "c318d7b909ab39db9cc861f645ddd364e7475a4a3425bb702ab407fad3807acd", strip_prefix = "consensus-specs-" + consensus_spec_version[1:], url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version, ) diff --git a/config/params/loader_test.go b/config/params/loader_test.go index a4688083f2..9cf57f6acf 100644 --- a/config/params/loader_test.go +++ b/config/params/loader_test.go @@ -14,7 +14,7 @@ import ( "gopkg.in/yaml.v2" ) -var placeholderFields []string +var placeholderFields = []string{"PROPOSER_SCORE_BOOST"} func TestLoadConfigFileMainnet(t *testing.T) { // See https://media.githubusercontent.com/media/ethereum/consensus-spec-tests/master/tests/minimal/config/phase0.yaml From afbe02697da33c2ad752486d5e09cf1d3223c10c Mon Sep 17 00:00:00 2001 From: Potuz Date: Tue, 30 Nov 2021 19:27:03 -0300 Subject: [PATCH 33/45] Monitor service (#9933) * Add a service for the monitor * Do not block service start * gaz * move channel subscription outide go routine * add service start test * fix panic on node tests * Radek's first pass * Radek's take 2 * uncap error messages * revert reversal * Terence take 1 * gaz * Missing locks found by Terence * Track via bool not empty interface * Add tests for every function * fix allocation of slice * Minor cleanups Co-authored-by: terence tsao --- beacon-chain/monitor/BUILD.bazel | 9 + .../monitor/process_attestation_test.go | 49 --- beacon-chain/monitor/process_block_test.go | 8 +- beacon-chain/monitor/process_exit_test.go | 24 +- beacon-chain/monitor/service.go | 229 +++++++++++++- beacon-chain/monitor/service_test.go | 285 +++++++++++++++++- beacon-chain/node/BUILD.bazel | 1 + beacon-chain/node/node.go | 36 +++ cmd/beacon-chain/main.go | 1 + cmd/beacon-chain/usage.go | 1 + cmd/flags.go | 7 + cmd/wrap_flags.go | 2 + 12 files changed, 569 insertions(+), 83 deletions(-) diff --git a/beacon-chain/monitor/BUILD.bazel b/beacon-chain/monitor/BUILD.bazel index a1fdf5be85..ebd0cac455 100644 --- a/beacon-chain/monitor/BUILD.bazel +++ b/beacon-chain/monitor/BUILD.bazel @@ -14,8 +14,13 @@ go_library( importpath = "github.com/prysmaticlabs/prysm/beacon-chain/monitor", visibility = ["//beacon-chain:__subpackages__"], deps = [ + "//async/event:go_default_library", + "//beacon-chain/blockchain:go_default_library", "//beacon-chain/core/altair:go_default_library", "//beacon-chain/core/blocks:go_default_library", + "//beacon-chain/core/feed:go_default_library", + "//beacon-chain/core/feed/operation:go_default_library", + "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/core/helpers:go_default_library", "//beacon-chain/state:go_default_library", "//beacon-chain/state/stategen:go_default_library", @@ -44,7 +49,10 @@ go_test( ], embed = [":go_default_library"], deps = [ + "//beacon-chain/blockchain/testing:go_default_library", "//beacon-chain/core/altair:go_default_library", + "//beacon-chain/core/feed:go_default_library", + "//beacon-chain/core/feed/state:go_default_library", "//beacon-chain/db/testing:go_default_library", "//beacon-chain/state/stategen:go_default_library", "//config/params:go_default_library", @@ -53,6 +61,7 @@ go_test( "//proto/prysm/v1alpha1/wrapper:go_default_library", "//testing/require:go_default_library", "//testing/util:go_default_library", + "//time/slots:go_default_library", "@com_github_prysmaticlabs_eth2_types//:go_default_library", "@com_github_prysmaticlabs_go_bitfield//:go_default_library", "@com_github_sirupsen_logrus//:go_default_library", diff --git a/beacon-chain/monitor/process_attestation_test.go b/beacon-chain/monitor/process_attestation_test.go index 6aca6b2896..e6c9b657f7 100644 --- a/beacon-chain/monitor/process_attestation_test.go +++ b/beacon-chain/monitor/process_attestation_test.go @@ -5,10 +5,7 @@ import ( "context" "testing" - types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/go-bitfield" - testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" - "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" @@ -18,52 +15,6 @@ import ( logTest "github.com/sirupsen/logrus/hooks/test" ) -func setupService(t *testing.T) *Service { - beaconDB := testDB.SetupDB(t) - - trackedVals := map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - 12: nil, - 15: nil, - } - latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{ - 1: { - balance: 32000000000, - }, - 2: { - balance: 32000000000, - }, - 12: { - balance: 31900000000, - }, - 15: { - balance: 31900000000, - }, - } - aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ - 1: {}, - 2: {}, - 12: {}, - 15: {}, - } - trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{ - 1: {0, 1, 2, 3}, - 12: {4, 5}, - } - return &Service{ - config: &ValidatorMonitorConfig{ - StateGen: stategen.New(beaconDB), - }, - - TrackedValidators: trackedVals, - latestPerformance: latestPerformance, - aggregatedPerformance: aggregatedPerformance, - trackedSyncCommitteeIndices: trackedSyncCommitteeIndices, - lastSyncedEpoch: 0, - } -} - func TestGetAttestingIndices(t *testing.T) { ctx := context.Background() beaconState, _ := util.DeterministicGenesisState(t, 256) diff --git a/beacon-chain/monitor/process_block_test.go b/beacon-chain/monitor/process_block_test.go index 73d31108f0..568708df99 100644 --- a/beacon-chain/monitor/process_block_test.go +++ b/beacon-chain/monitor/process_block_test.go @@ -117,9 +117,9 @@ func TestProcessSlashings(t *testing.T) { t.Run(tt.name, func(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, + TrackedValidators: map[types.ValidatorIndex]bool{ + 1: true, + 2: true, }, } s.processSlashings(wrapper.WrappedPhase0BeaconBlock(tt.block)) @@ -205,7 +205,7 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) { idx := b.Block.Body.ProposerSlashings[0].Header_1.Header.ProposerIndex s.RLock() if !s.trackedIndex(idx) { - s.TrackedValidators[idx] = nil + s.TrackedValidators[idx] = true s.latestPerformance[idx] = ValidatorLatestPerformance{ balance: 31900000000, } diff --git a/beacon-chain/monitor/process_exit_test.go b/beacon-chain/monitor/process_exit_test.go index 09f7b6e371..63e88bfa4c 100644 --- a/beacon-chain/monitor/process_exit_test.go +++ b/beacon-chain/monitor/process_exit_test.go @@ -13,9 +13,9 @@ import ( func TestProcessExitsFromBlockTrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, + TrackedValidators: map[types.ValidatorIndex]bool{ + 1: true, + 2: true, }, } @@ -47,9 +47,9 @@ func TestProcessExitsFromBlockTrackedIndices(t *testing.T) { func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, + TrackedValidators: map[types.ValidatorIndex]bool{ + 1: true, + 2: true, }, } @@ -81,9 +81,9 @@ func TestProcessExitsFromBlockUntrackedIndices(t *testing.T) { func TestProcessExitP2PTrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, + TrackedValidators: map[types.ValidatorIndex]bool{ + 1: true, + 2: true, }, } @@ -101,9 +101,9 @@ func TestProcessExitP2PTrackedIndices(t *testing.T) { func TestProcessExitP2PUntrackedIndices(t *testing.T) { hook := logTest.NewGlobal() s := &Service{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, + TrackedValidators: map[types.ValidatorIndex]bool{ + 1: true, + 2: true, }, } diff --git a/beacon-chain/monitor/service.go b/beacon-chain/monitor/service.go index c04d32103d..a5fa5628ca 100644 --- a/beacon-chain/monitor/service.go +++ b/beacon-chain/monitor/service.go @@ -1,13 +1,30 @@ package monitor import ( + "context" + "errors" + "sort" "sync" types "github.com/prysmaticlabs/eth2-types" + "github.com/prysmaticlabs/prysm/async/event" + "github.com/prysmaticlabs/prysm/beacon-chain/blockchain" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/operation" + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" "github.com/prysmaticlabs/prysm/beacon-chain/core/helpers" "github.com/prysmaticlabs/prysm/beacon-chain/state" "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" "github.com/prysmaticlabs/prysm/time/slots" + "github.com/sirupsen/logrus" +) + +var ( + // Error when event feed data is not statefeed.SyncedData. + errNotSyncedData = errors.New("event feed data is not of type *statefeed.SyncedData") + + // Error when the context is closed while waiting for sync. + errContextClosedWhileWaiting = errors.New("context closed while waiting for beacon to sync to latest Head") ) // ValidatorLatestPerformance keeps track of the latest participation of the validator @@ -24,6 +41,8 @@ type ValidatorLatestPerformance struct { // ValidatorAggregatedPerformance keeps track of the accumulated performance of // the validator since launch type ValidatorAggregatedPerformance struct { + startEpoch types.Epoch + startBalance uint64 totalAttestedCount uint64 totalRequestedCount uint64 totalDistance uint64 @@ -40,25 +59,229 @@ type ValidatorAggregatedPerformance struct { // monitor service tracks, as well as the event feed notifier that the // monitor needs to subscribe. type ValidatorMonitorConfig struct { - StateGen stategen.StateManager + StateNotifier statefeed.Notifier + AttestationNotifier operation.Notifier + HeadFetcher blockchain.HeadFetcher + StateGen stategen.StateManager } // Service is the main structure that tracks validators and reports logs and // metrics of their performances throughout their lifetime. type Service struct { - config *ValidatorMonitorConfig + config *ValidatorMonitorConfig + ctx context.Context + cancel context.CancelFunc + isLogging bool // Locks access to TrackedValidators, latestPerformance, aggregatedPerformance, // trackedSyncedCommitteeIndices and lastSyncedEpoch sync.RWMutex - TrackedValidators map[types.ValidatorIndex]interface{} + TrackedValidators map[types.ValidatorIndex]bool latestPerformance map[types.ValidatorIndex]ValidatorLatestPerformance aggregatedPerformance map[types.ValidatorIndex]ValidatorAggregatedPerformance trackedSyncCommitteeIndices map[types.ValidatorIndex][]types.CommitteeIndex lastSyncedEpoch types.Epoch } +// NewService sets up a new validator monitor instance when given a list of validator indices to track. +func NewService(ctx context.Context, config *ValidatorMonitorConfig, tracked []types.ValidatorIndex) (*Service, error) { + ctx, cancel := context.WithCancel(ctx) + r := &Service{ + config: config, + ctx: ctx, + cancel: cancel, + TrackedValidators: make(map[types.ValidatorIndex]bool, len(tracked)), + latestPerformance: make(map[types.ValidatorIndex]ValidatorLatestPerformance), + aggregatedPerformance: make(map[types.ValidatorIndex]ValidatorAggregatedPerformance), + trackedSyncCommitteeIndices: make(map[types.ValidatorIndex][]types.CommitteeIndex), + } + for _, idx := range tracked { + r.TrackedValidators[idx] = true + } + return r, nil +} + +// Start sets up the TrackedValidators map and then calls to wait until the beacon is synced. +func (s *Service) Start() { + s.Lock() + defer s.Unlock() + + tracked := make([]types.ValidatorIndex, 0, len(s.TrackedValidators)) + for idx := range s.TrackedValidators { + tracked = append(tracked, idx) + } + sort.Slice(tracked, func(i, j int) bool { return tracked[i] < tracked[j] }) + + log.WithFields(logrus.Fields{ + "ValidatorIndices": tracked, + }).Info("Starting service") + + s.isLogging = false + stateChannel := make(chan *feed.Event, 1) + stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) + + go s.run(stateChannel, stateSub) +} + +// run waits until the beacon is synced and starts the monitoring system. +func (s *Service) run(stateChannel chan *feed.Event, stateSub event.Subscription) { + if stateChannel == nil { + log.Error("State state is nil") + return + } + + if err := s.waitForSync(stateChannel, stateSub); err != nil { + log.WithError(err) + return + } + state, err := s.config.HeadFetcher.HeadState(s.ctx) + if err != nil { + log.WithError(err).Error("Could not get head state") + return + } + if state == nil { + log.Error("Head state is nil") + return + } + + epoch := slots.ToEpoch(state.Slot()) + log.WithField("Epoch", epoch).Info("Synced to head epoch, starting reporting performance") + + s.Lock() + s.initializePerformanceStructures(state, epoch) + s.Unlock() + + s.updateSyncCommitteeTrackedVals(state) + + s.Lock() + s.isLogging = true + s.Unlock() + + s.monitorRoutine(stateChannel, stateSub) +} + +// initializePerformanceStructures initializes the validatorLatestPerformance +// and validatorAggregatedPerformance for each tracked validator. +func (s *Service) initializePerformanceStructures(state state.BeaconState, epoch types.Epoch) { + for idx := range s.TrackedValidators { + balance, err := state.BalanceAtIndex(idx) + if err != nil { + log.WithError(err).WithField("ValidatorIndex", idx).Error( + "Could not fetch starting balance, skipping aggregated logs.") + balance = 0 + } + s.aggregatedPerformance[idx] = ValidatorAggregatedPerformance{ + startEpoch: epoch, + startBalance: balance, + } + s.latestPerformance[idx] = ValidatorLatestPerformance{ + balance: balance, + } + } +} + +// Status retrieves the status of the service. +func (s *Service) Status() error { + if s.isLogging { + return nil + } + return errors.New("not running") +} + +// Stop stops the service. +func (s *Service) Stop() error { + defer s.cancel() + s.isLogging = false + return nil +} + +// waitForSync waits until the beacon node is synced to the latest head. +func (s *Service) waitForSync(stateChannel chan *feed.Event, stateSub event.Subscription) error { + for { + select { + case event := <-stateChannel: + if event.Type == statefeed.Synced { + _, ok := event.Data.(*statefeed.SyncedData) + if !ok { + return errNotSyncedData + } + return nil + } + case <-s.ctx.Done(): + log.Debug("Context closed, exiting goroutine") + return errContextClosedWhileWaiting + case err := <-stateSub.Err(): + log.WithError(err).Error("Could not subscribe to state notifier") + return err + } + } +} + +// monitorRoutine is the main dispatcher, it registers event channels for the +// state feed and the operation feed. It then calls the appropriate function +// when we get messages after syncing a block or processing attestations/sync +// committee contributions. +func (s *Service) monitorRoutine(stateChannel chan *feed.Event, stateSub event.Subscription) { + defer stateSub.Unsubscribe() + + opChannel := make(chan *feed.Event, 1) + opSub := s.config.AttestationNotifier.OperationFeed().Subscribe(opChannel) + defer opSub.Unsubscribe() + + for { + select { + case event := <-stateChannel: + if event.Type == statefeed.BlockProcessed { + data, ok := event.Data.(*statefeed.BlockProcessedData) + if !ok { + log.Error("Event feed data is not of type *statefeed.BlockProcessedData") + } else if data.Verified { + // We only process blocks that have been verified + s.processBlock(s.ctx, data.SignedBlock) + } + } + case event := <-opChannel: + switch event.Type { + case operation.UnaggregatedAttReceived: + data, ok := event.Data.(*operation.UnAggregatedAttReceivedData) + if !ok { + log.Error("Event feed data is not of type *operation.UnAggregatedAttReceivedData") + } else { + s.processUnaggregatedAttestation(s.ctx, data.Attestation) + } + case operation.AggregatedAttReceived: + data, ok := event.Data.(*operation.AggregatedAttReceivedData) + if !ok { + log.Error("Event feed data is not of type *operation.AggregatedAttReceivedData") + } else { + s.processAggregatedAttestation(s.ctx, data.Attestation) + } + case operation.ExitReceived: + data, ok := event.Data.(*operation.ExitReceivedData) + if !ok { + log.Error("Event feed data is not of type *operation.ExitReceivedData") + } else { + s.processExit(data.Exit) + } + case operation.SyncCommitteeContributionReceived: + data, ok := event.Data.(*operation.SyncCommitteeContributionReceivedData) + if !ok { + log.Error("Event feed data is not of type *operation.SyncCommitteeContributionReceivedData") + } else { + s.processSyncCommitteeContribution(data.Contribution) + } + } + case <-s.ctx.Done(): + log.Debug("Context closed, exiting goroutine") + return + case err := <-stateSub.Err(): + log.WithError(err).Error("Could not subscribe to state notifier") + return + } + } +} + // TrackedIndex returns if the given validator index corresponds to one of the // validators we follow. // It assumes the caller holds the service Lock diff --git a/beacon-chain/monitor/service_test.go b/beacon-chain/monitor/service_test.go index 8c715cdc31..3d80b4ed6d 100644 --- a/beacon-chain/monitor/service_test.go +++ b/beacon-chain/monitor/service_test.go @@ -1,29 +1,30 @@ package monitor import ( + "context" + "fmt" + "sync" "testing" + "time" types "github.com/prysmaticlabs/eth2-types" + mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/core/altair" + "github.com/prysmaticlabs/prysm/beacon-chain/core/feed" + statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state" + testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing" + "github.com/prysmaticlabs/prysm/beacon-chain/state/stategen" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/testing/require" "github.com/prysmaticlabs/prysm/testing/util" + "github.com/prysmaticlabs/prysm/time/slots" logTest "github.com/sirupsen/logrus/hooks/test" ) -func TestTrackedIndex(t *testing.T) { - s := &Service{ - TrackedValidators: map[types.ValidatorIndex]interface{}{ - 1: nil, - 2: nil, - }, - } - require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true) - require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false) -} - -func TestUpdateSyncCommitteeTrackedVals(t *testing.T) { - hook := logTest.NewGlobal() - s := setupService(t) - state, _ := util.DeterministicGenesisStateAltair(t, 1024) +func setupService(t *testing.T) *Service { + beaconDB := testDB.SetupDB(t) + state, _ := util.DeterministicGenesisStateAltair(t, 256) pubKeys := make([][]byte, 3) pubKeys[0] = state.Validators()[0].PublicKey @@ -35,6 +36,77 @@ func TestUpdateSyncCommitteeTrackedVals(t *testing.T) { }) require.NoError(t, state.SetCurrentSyncCommittee(currentSyncCommittee)) + chainService := &mock.ChainService{ + Genesis: time.Now(), + DB: beaconDB, + State: state, + Root: []byte("hello-world"), + ValidatorsRoot: [32]byte{}, + } + + trackedVals := map[types.ValidatorIndex]bool{ + 1: true, + 2: true, + 12: true, + 15: true, + } + latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{ + 1: { + balance: 32000000000, + }, + 2: { + balance: 32000000000, + }, + 12: { + balance: 31900000000, + }, + 15: { + balance: 31900000000, + }, + } + aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ + 1: {}, + 2: {}, + 12: {}, + 15: {}, + } + trackedSyncCommitteeIndices := map[types.ValidatorIndex][]types.CommitteeIndex{ + 1: {0, 1, 2, 3}, + 12: {4, 5}, + } + return &Service{ + config: &ValidatorMonitorConfig{ + StateGen: stategen.New(beaconDB), + StateNotifier: chainService.StateNotifier(), + HeadFetcher: chainService, + AttestationNotifier: chainService.OperationNotifier(), + }, + + ctx: context.Background(), + TrackedValidators: trackedVals, + latestPerformance: latestPerformance, + aggregatedPerformance: aggregatedPerformance, + trackedSyncCommitteeIndices: trackedSyncCommitteeIndices, + lastSyncedEpoch: 0, + } +} + +func TestTrackedIndex(t *testing.T) { + s := &Service{ + TrackedValidators: map[types.ValidatorIndex]bool{ + 1: true, + 2: true, + }, + } + require.Equal(t, s.trackedIndex(types.ValidatorIndex(1)), true) + require.Equal(t, s.trackedIndex(types.ValidatorIndex(3)), false) +} + +func TestUpdateSyncCommitteeTrackedVals(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + state, _ := util.DeterministicGenesisStateAltair(t, 1024) + s.updateSyncCommitteeTrackedVals(state) require.LogsDoNotContain(t, hook, "Sync committee assignments will not be reported") newTrackedSyncIndices := map[types.ValidatorIndex][]types.CommitteeIndex{ @@ -43,3 +115,186 @@ func TestUpdateSyncCommitteeTrackedVals(t *testing.T) { } require.DeepEqual(t, s.trackedSyncCommitteeIndices, newTrackedSyncIndices) } + +func TestNewService(t *testing.T) { + config := &ValidatorMonitorConfig{} + tracked := []types.ValidatorIndex{} + ctx := context.Background() + _, err := NewService(ctx, config, tracked) + require.NoError(t, err) +} + +func TestStart(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + stateChannel := make(chan *feed.Event, 1) + stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) + defer stateSub.Unsubscribe() + + wg := &sync.WaitGroup{} + wg.Add(1) + s.Start() + + go func() { + select { + case stateEvent := <-stateChannel: + if stateEvent.Type == statefeed.Synced { + _, ok := stateEvent.Data.(*statefeed.SyncedData) + require.Equal(t, true, ok, "Event feed data is not type *statefeed.SyncedData") + } + case <-s.ctx.Done(): + } + wg.Done() + }() + + for sent := 0; sent == 0; { + sent = s.config.StateNotifier.StateFeed().Send(&feed.Event{ + Type: statefeed.Synced, + Data: &statefeed.SyncedData{ + StartTime: time.Now(), + }, + }) + } + + // wait for Logrus + time.Sleep(1000 * time.Millisecond) + require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance") + require.LogsContain(t, hook, "\"Starting service\" ValidatorIndices=\"[1 2 12 15]\"") + require.Equal(t, s.isLogging, true, "monitor is not running") +} + +func TestInitializePerformanceStructures(t *testing.T) { + hook := logTest.NewGlobal() + ctx := context.Background() + s := setupService(t) + state, err := s.config.HeadFetcher.HeadState(ctx) + require.NoError(t, err) + epoch := slots.ToEpoch(state.Slot()) + s.initializePerformanceStructures(state, epoch) + require.LogsDoNotContain(t, hook, "Could not fetch starting balance") + latestPerformance := map[types.ValidatorIndex]ValidatorLatestPerformance{ + 1: { + balance: 32000000000, + }, + 2: { + balance: 32000000000, + }, + 12: { + balance: 32000000000, + }, + 15: { + balance: 32000000000, + }, + } + aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ + 1: { + startBalance: 32000000000, + }, + 2: { + startBalance: 32000000000, + }, + 12: { + startBalance: 32000000000, + }, + 15: { + startBalance: 32000000000, + }, + } + + require.DeepEqual(t, s.latestPerformance, latestPerformance) + require.DeepEqual(t, s.aggregatedPerformance, aggregatedPerformance) +} + +func TestMonitorRoutine(t *testing.T) { + ctx := context.Background() + hook := logTest.NewGlobal() + s := setupService(t) + stateChannel := make(chan *feed.Event, 1) + stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) + + wg := &sync.WaitGroup{} + wg.Add(1) + + go func() { + s.monitorRoutine(stateChannel, stateSub) + wg.Done() + }() + + genesis, keys := util.DeterministicGenesisStateAltair(t, 64) + c, err := altair.NextSyncCommittee(ctx, genesis) + require.NoError(t, err) + require.NoError(t, genesis.SetCurrentSyncCommittee(c)) + + genConfig := util.DefaultBlockGenConfig() + block, err := util.GenerateFullBlockAltair(genesis, keys, genConfig, 1) + require.NoError(t, err) + root, err := block.GetBlock().HashTreeRoot() + require.NoError(t, err) + require.NoError(t, s.config.StateGen.SaveState(ctx, root, genesis)) + + wrapped, err := wrapper.WrappedAltairSignedBeaconBlock(block) + require.NoError(t, err) + + stateChannel <- &feed.Event{ + Type: statefeed.BlockProcessed, + Data: &statefeed.BlockProcessedData{ + Slot: 1, + Verified: true, + SignedBlock: wrapped, + }, + } + + // Wait for Logrus + time.Sleep(1000 * time.Millisecond) + wanted1 := fmt.Sprintf("\"Proposed block was included\" BalanceChange=100000000 BlockRoot=%#x NewBalance=32000000000 ParentRoot=0xf732eaeb7fae ProposerIndex=15 Slot=1 Version=1 prefix=monitor", bytesutil.Trunc(root[:])) + require.LogsContain(t, hook, wanted1) + +} + +func TestWaitForSync(t *testing.T) { + s := setupService(t) + stateChannel := make(chan *feed.Event, 1) + stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) + defer stateSub.Unsubscribe() + + wg := &sync.WaitGroup{} + wg.Add(1) + + go func() { + err := s.waitForSync(stateChannel, stateSub) + require.NoError(t, err) + wg.Done() + }() + + stateChannel <- &feed.Event{ + Type: statefeed.Synced, + Data: &statefeed.SyncedData{ + StartTime: time.Now(), + }, + } +} + +func TestRun(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + stateChannel := make(chan *feed.Event, 1) + stateSub := s.config.StateNotifier.StateFeed().Subscribe(stateChannel) + + wg := &sync.WaitGroup{} + wg.Add(1) + + go func() { + s.run(stateChannel, stateSub) + wg.Done() + }() + + stateChannel <- &feed.Event{ + Type: statefeed.Synced, + Data: &statefeed.SyncedData{ + StartTime: time.Now(), + }, + } + //wait for Logrus + time.Sleep(1000 * time.Millisecond) + require.LogsContain(t, hook, "Synced to head epoch, starting reporting performance") +} diff --git a/beacon-chain/node/BUILD.bazel b/beacon-chain/node/BUILD.bazel index 5924c17e92..03e29098b4 100644 --- a/beacon-chain/node/BUILD.bazel +++ b/beacon-chain/node/BUILD.bazel @@ -26,6 +26,7 @@ go_library( "//beacon-chain/forkchoice:go_default_library", "//beacon-chain/forkchoice/protoarray:go_default_library", "//beacon-chain/gateway:go_default_library", + "//beacon-chain/monitor:go_default_library", "//beacon-chain/node/registration:go_default_library", "//beacon-chain/operations/attestations:go_default_library", "//beacon-chain/operations/slashings:go_default_library", diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index 65e8553b9c..2aa2f2b4c2 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -17,6 +17,7 @@ import ( "github.com/ethereum/go-ethereum/common" "github.com/pkg/errors" + types "github.com/prysmaticlabs/eth2-types" apigateway "github.com/prysmaticlabs/prysm/api/gateway" "github.com/prysmaticlabs/prysm/async/event" "github.com/prysmaticlabs/prysm/beacon-chain/blockchain" @@ -28,6 +29,7 @@ import ( "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice" "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray" "github.com/prysmaticlabs/prysm/beacon-chain/gateway" + "github.com/prysmaticlabs/prysm/beacon-chain/monitor" "github.com/prysmaticlabs/prysm/beacon-chain/node/registration" "github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations" "github.com/prysmaticlabs/prysm/beacon-chain/operations/slashings" @@ -205,6 +207,10 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) { return nil, err } + if err := beacon.registerValidatorMonitorService(); err != nil { + return nil, err + } + if !cliCtx.Bool(cmd.DisableMonitoringFlag.Name) { if err := beacon.registerPrometheusService(cliCtx); err != nil { return nil, err @@ -866,3 +872,33 @@ func (b *BeaconNode) registerDeterminsticGenesisService() error { } return nil } + +func (b *BeaconNode) registerValidatorMonitorService() error { + if cmd.ValidatorMonitorIndicesFlag.Value == nil { + return nil + } + cliSlice := cmd.ValidatorMonitorIndicesFlag.Value.Value() + if cliSlice == nil { + return nil + } + tracked := make([]types.ValidatorIndex, len(cliSlice)) + for i := range tracked { + tracked[i] = types.ValidatorIndex(cliSlice[i]) + } + + var chainService *blockchain.Service + if err := b.services.FetchService(&chainService); err != nil { + return err + } + monitorConfig := &monitor.ValidatorMonitorConfig{ + StateNotifier: b, + AttestationNotifier: b, + StateGen: b.stateGen, + HeadFetcher: chainService, + } + svc, err := monitor.NewService(b.ctx, monitorConfig, tracked) + if err != nil { + return err + } + return b.services.RegisterService(svc) +} diff --git a/cmd/beacon-chain/main.go b/cmd/beacon-chain/main.go index d299058c93..360e66a8fe 100644 --- a/cmd/beacon-chain/main.go +++ b/cmd/beacon-chain/main.go @@ -118,6 +118,7 @@ var appFlags = []cli.Flag{ cmd.RestoreSourceFileFlag, cmd.RestoreTargetDirFlag, cmd.BoltMMapInitialSizeFlag, + cmd.ValidatorMonitorIndicesFlag, } func init() { diff --git a/cmd/beacon-chain/usage.go b/cmd/beacon-chain/usage.go index 101358fffb..0610d68ad5 100644 --- a/cmd/beacon-chain/usage.go +++ b/cmd/beacon-chain/usage.go @@ -73,6 +73,7 @@ var appHelpFlagGroups = []flagGroup{ cmd.RestoreSourceFileFlag, cmd.RestoreTargetDirFlag, cmd.BoltMMapInitialSizeFlag, + cmd.ValidatorMonitorIndicesFlag, }, }, { diff --git a/cmd/flags.go b/cmd/flags.go index cf378012b7..1d8e34819e 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -224,6 +224,13 @@ var ( Name: "accept-terms-of-use", Usage: "Accept Terms and Conditions (for non-interactive environments)", } + // ValidatorMonitorIndicesFlag specifies a list of validator indices to + // track for performance updates + ValidatorMonitorIndicesFlag = &cli.IntSliceFlag{ + Name: "monitor-indices", + Usage: "List of validator indices to track performance", + } + // RestoreSourceFileFlag specifies the filepath to the backed-up database file // which will be used to restore the database. RestoreSourceFileFlag = &cli.StringFlag{ diff --git a/cmd/wrap_flags.go b/cmd/wrap_flags.go index 4bbdef3dca..166cd8f451 100644 --- a/cmd/wrap_flags.go +++ b/cmd/wrap_flags.go @@ -33,6 +33,8 @@ func WrapFlags(flags []cli.Flag) []cli.Flag { case *cli.Int64Flag: // Int64Flag does not work. See https://github.com/prysmaticlabs/prysm/issues/6478 panic(fmt.Sprintf("unsupported flag type type %T", f)) + case *cli.IntSliceFlag: + f = altsrc.NewIntSliceFlag(t) default: panic(fmt.Sprintf("cannot convert type %T", f)) } From cd6d0d9cf1d00f028b50ab80adfa68e6166d3a3e Mon Sep 17 00:00:00 2001 From: Potuz Date: Wed, 1 Dec 2021 00:35:55 -0300 Subject: [PATCH 34/45] Monitor aggregated logs (#9943) --- beacon-chain/monitor/process_block.go | 46 ++++++++++++++++++++++ beacon-chain/monitor/process_block_test.go | 15 +++++++ beacon-chain/monitor/service_test.go | 14 ++++++- 3 files changed, 74 insertions(+), 1 deletion(-) diff --git a/beacon-chain/monitor/process_block.go b/beacon-chain/monitor/process_block.go index c2aac89377..5803aabfb4 100644 --- a/beacon-chain/monitor/process_block.go +++ b/beacon-chain/monitor/process_block.go @@ -7,12 +7,16 @@ import ( types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/core/blocks" "github.com/prysmaticlabs/prysm/beacon-chain/state" + "github.com/prysmaticlabs/prysm/config/params" "github.com/prysmaticlabs/prysm/encoding/bytesutil" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "github.com/prysmaticlabs/prysm/time/slots" "github.com/sirupsen/logrus" ) +// Number of epochs between aggregate reports +const AggregateReportingPeriod = 5 + // processBlock handles the cases when // 1) A block was proposed by one of our tracked validators // 2) An attestation by one of our tracked validators was included @@ -53,6 +57,10 @@ func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) { s.processSyncAggregate(state, blk) s.processProposedBlock(state, root, blk) s.processAttestations(ctx, state, blk) + + if blk.Slot()%AggregateReportingPeriod*params.BeaconConfig().SlotsPerEpoch == 0 { + s.logAggregatedPerformance() + } } // processProposedBlock logs the event that one of our tracked validators proposed a block that was included @@ -129,3 +137,41 @@ func (s *Service) processSlashings(blk block.BeaconBlock) { } } } + +// logAggregatedPerformance logs the performance statistics collected since the run started +func (s *Service) logAggregatedPerformance() { + s.RLock() + defer s.RUnlock() + + for idx, p := range s.aggregatedPerformance { + if p.totalAttestedCount == 0 || p.totalRequestedCount == 0 || p.startBalance == 0 { + break + } + l, ok := s.latestPerformance[idx] + if !ok { + break + } + percentAtt := float64(p.totalAttestedCount) / float64(p.totalRequestedCount) + percentBal := float64(l.balance-p.startBalance) / float64(p.startBalance) + percentDistance := float64(p.totalDistance) / float64(p.totalAttestedCount) + percentCorrectSource := float64(p.totalCorrectSource) / float64(p.totalAttestedCount) + percentCorrectHead := float64(p.totalCorrectHead) / float64(p.totalAttestedCount) + percentCorrectTarget := float64(p.totalCorrectTarget) / float64(p.totalAttestedCount) + + log.WithFields(logrus.Fields{ + "ValidatorIndex": idx, + "StartEpoch": p.startEpoch, + "StartBalance": p.startBalance, + "TotalRequested": p.totalRequestedCount, + "AttestationInclusion": fmt.Sprintf("%.2f%%", percentAtt*100), + "BalanceChangePct": fmt.Sprintf("%.2f%%", percentBal*100), + "CorrectlyVotedSourcePct": fmt.Sprintf("%.2f%%", percentCorrectSource*100), + "CorrectlyVotedTargetPct": fmt.Sprintf("%.2f%%", percentCorrectTarget*100), + "CorrectlyVotedHeadPct": fmt.Sprintf("%.2f%%", percentCorrectHead*100), + "AverageInclusionDistance": fmt.Sprintf("%.1f", percentDistance), + "TotalProposedBlocks": p.totalProposedCount, + "TotalAggregations": p.totalAggregations, + "TotalSyncContributions": p.totalSyncComitteeContributions, + }).Info("Aggregated performance since launch") + } +} diff --git a/beacon-chain/monitor/process_block_test.go b/beacon-chain/monitor/process_block_test.go index 568708df99..2bd4270d6d 100644 --- a/beacon-chain/monitor/process_block_test.go +++ b/beacon-chain/monitor/process_block_test.go @@ -4,6 +4,7 @@ import ( "context" "fmt" "testing" + "time" types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/beacon-chain/core/altair" @@ -229,3 +230,17 @@ func TestProcessBlock_AllEventsTrackedVals(t *testing.T) { require.LogsContain(t, hook, wanted3) require.LogsContain(t, hook, wanted4) } + +func TestLogAggregatedPerformance(t *testing.T) { + hook := logTest.NewGlobal() + s := setupService(t) + + s.logAggregatedPerformance() + time.Sleep(3000 * time.Millisecond) + wanted := "\"Aggregated performance since launch\" AttestationInclusion=\"80.00%\"" + + " AverageInclusionDistance=1.2 BalanceChangePct=\"0.95%\" CorrectlyVotedHeadPct=\"66.67%\" " + + "CorrectlyVotedSourcePct=\"91.67%\" CorrectlyVotedTargetPct=\"100.00%\" StartBalance=31700000000 " + + "StartEpoch=0 TotalAggregations=0 TotalProposedBlocks=1 TotalRequested=15 TotalSyncContributions=0 " + + "ValidatorIndex=1 prefix=monitor" + require.LogsContain(t, hook, wanted) +} diff --git a/beacon-chain/monitor/service_test.go b/beacon-chain/monitor/service_test.go index 3d80b4ed6d..2ca5655b26 100644 --- a/beacon-chain/monitor/service_test.go +++ b/beacon-chain/monitor/service_test.go @@ -65,7 +65,19 @@ func setupService(t *testing.T) *Service { }, } aggregatedPerformance := map[types.ValidatorIndex]ValidatorAggregatedPerformance{ - 1: {}, + 1: { + startEpoch: 0, + startBalance: 31700000000, + totalAttestedCount: 12, + totalRequestedCount: 15, + totalDistance: 14, + totalCorrectHead: 8, + totalCorrectSource: 11, + totalCorrectTarget: 12, + totalProposedCount: 1, + totalSyncComitteeContributions: 0, + totalSyncComitteeAggregations: 0, + }, 2: {}, 12: {}, 15: {}, From 7cbef104b0629387b3d920741d9d5c0e6f046fe7 Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Wed, 1 Dec 2021 12:03:26 +0800 Subject: [PATCH 35/45] Remove Balances Timeout (#9957) Co-authored-by: Raul Jordan --- beacon-chain/rpc/prysm/v1alpha1/beacon/validators.go | 6 ------ 1 file changed, 6 deletions(-) diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/validators.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/validators.go index af0460c36e..9eb6de56c8 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/validators.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/validators.go @@ -4,7 +4,6 @@ import ( "context" "sort" "strconv" - "time" types "github.com/prysmaticlabs/eth2-types" "github.com/prysmaticlabs/prysm/api/pagination" @@ -27,9 +26,6 @@ import ( "google.golang.org/protobuf/types/known/emptypb" ) -// BalancesTimeout for gRPC requests to ListValidatorBalances. -const BalancesTimeout = time.Second * 30 - // ListValidatorBalances retrieves the validator balances for a given set of public keys. // An optional Epoch parameter is provided to request historical validator balances from // archived, persistent data. @@ -37,8 +33,6 @@ func (bs *Server) ListValidatorBalances( ctx context.Context, req *ethpb.ListValidatorBalancesRequest, ) (*ethpb.ValidatorBalances, error) { - ctx, cancel := context.WithTimeout(ctx, BalancesTimeout) - defer cancel() if int(req.PageSize) > cmd.Get().MaxRPCPageSize { return nil, status.Errorf(codes.InvalidArgument, "Requested page size %d can not be greater than max size %d", req.PageSize, cmd.Get().MaxRPCPageSize) From d94bf32dcfc6cd94c55765478c514e700952a27d Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Wed, 1 Dec 2021 20:37:10 +0800 Subject: [PATCH 36/45] Faster Doppelganger Check (#9964) * faster check * potuz's review * potuz's review --- .../rpc/prysm/v1alpha1/validator/status.go | 68 ++++++++++++++----- .../prysm/v1alpha1/validator/status_test.go | 19 +----- 2 files changed, 54 insertions(+), 33 deletions(-) diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/status.go b/beacon-chain/rpc/prysm/v1alpha1/validator/status.go index ea527475cc..d8f8a6fa20 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/status.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/status.go @@ -24,6 +24,8 @@ import ( var errPubkeyDoesNotExist = errors.New("pubkey does not exist") var nonExistentIndex = types.ValidatorIndex(^uint64(0)) +const numStatesToCheck = 2 + // ValidatorStatus returns the validator status of the current epoch. // The status response can be one of the following: // DEPOSITED - validator's deposit has been recognized by Ethereum 1, not yet recognized by Ethereum. @@ -106,9 +108,17 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger if err != nil { return nil, status.Error(codes.Internal, "Could not get head state") } + + currEpoch := slots.ToEpoch(headState.Slot()) + isRecent, resp := checkValidatorsAreRecent(currEpoch, req) + // If all provided keys are recent we skip this check + // as we are unable to effectively determine if a doppelganger + // is active. + if isRecent { + return resp, nil + } // We walk back from the current head state to the state at the beginning of the previous 2 epochs. // Where S_i , i := 0,1,2. i = 0 would signify the current head state in this epoch. - currEpoch := slots.ToEpoch(headState.Slot()) previousEpoch, err := currEpoch.SafeSub(1) if err != nil { previousEpoch = currEpoch @@ -125,18 +135,18 @@ func (vs *Server) CheckDoppelGanger(ctx context.Context, req *ethpb.DoppelGanger if err != nil { return nil, status.Error(codes.Internal, "Could not get older state") } - resp := ðpb.DoppelGangerResponse{ + resp = ðpb.DoppelGangerResponse{ Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{}, } for _, v := range req.ValidatorRequests { // If the validator's last recorded epoch was - // less than or equal to 2 epochs ago, this method will not + // less than or equal to `numStatesToCheck` epochs ago, this method will not // be able to catch duplicates. This is due to how attestation // inclusion works, where an attestation for the current epoch // is able to included in the current or next epoch. Depending // on which epoch it is included the balance change will be // reflected in the following epoch. - if v.Epoch+2 >= currEpoch { + if v.Epoch+numStatesToCheck >= currEpoch { resp.Responses = append(resp.Responses, ðpb.DoppelGangerResponse_ValidatorResponse{ PublicKey: v.PublicKey, @@ -322,6 +332,44 @@ func (vs *Server) validatorStatus( } } +func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) { + endSlot, err := slots.EpochEnd(epoch) + if err != nil { + return nil, err + } + retState, err := vs.StateGen.StateBySlot(ctx, endSlot) + if err != nil { + return nil, err + } + return transition.ProcessSlots(ctx, retState, retState.Slot()+1) +} + +func checkValidatorsAreRecent(headEpoch types.Epoch, req *ethpb.DoppelGangerRequest) (bool, *ethpb.DoppelGangerResponse) { + validatorsAreRecent := true + resp := ðpb.DoppelGangerResponse{ + Responses: []*ethpb.DoppelGangerResponse_ValidatorResponse{}, + } + for _, v := range req.ValidatorRequests { + // Due to how balances are reflected for individual + // validators, we can only effectively determine if a + // validator voted or not if we are able to look + // back more than `numStatesToCheck` epochs into the past. + if v.Epoch+numStatesToCheck < headEpoch { + validatorsAreRecent = false + // Zero out response if we encounter non-recent validators to + // guard against potential misuse. + resp.Responses = []*ethpb.DoppelGangerResponse_ValidatorResponse{} + break + } + resp.Responses = append(resp.Responses, + ðpb.DoppelGangerResponse_ValidatorResponse{ + PublicKey: v.PublicKey, + DuplicateExists: false, + }) + } + return validatorsAreRecent, resp +} + func statusForPubKey(headState state.ReadOnlyBeaconState, pubKey []byte) (ethpb.ValidatorStatus, types.ValidatorIndex, error) { if headState == nil || headState.IsNil() { return ethpb.ValidatorStatus_UNKNOWN_STATUS, 0, errors.New("head state does not exist") @@ -371,15 +419,3 @@ func depositStatus(depositOrBalance uint64) ethpb.ValidatorStatus { } return ethpb.ValidatorStatus_DEPOSITED } - -func (vs *Server) retrieveAfterEpochTransition(ctx context.Context, epoch types.Epoch) (state.BeaconState, error) { - endSlot, err := slots.EpochEnd(epoch) - if err != nil { - return nil, err - } - retState, err := vs.StateGen.StateBySlot(ctx, endSlot) - if err != nil { - return nil, err - } - return transition.ProcessSlots(ctx, retState, retState.Slot()+1) -} diff --git a/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go b/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go index ec17ae3970..02bd02da31 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go +++ b/beacon-chain/rpc/prysm/v1alpha1/validator/status_test.go @@ -1170,25 +1170,10 @@ func TestServer_CheckDoppelGanger(t *testing.T) { svSetup: func(t *testing.T) (*Server, *ethpb.DoppelGangerRequest, *ethpb.DoppelGangerResponse) { mockGen := stategen.NewMockService() - hs, ps, os, keys := createStateSetup(t, 4, mockGen) - // Previous Epoch State - for i := 10; i < 15; i++ { - bal, err := ps.BalanceAtIndex(types.ValidatorIndex(i)) - assert.NoError(t, err) - // Add 100 gwei, to mock an active validator - assert.NoError(t, ps.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-1000000000)) - } - - // Older Epoch State - for i := 10; i < 15; i++ { - bal, err := os.BalanceAtIndex(types.ValidatorIndex(i)) - assert.NoError(t, err) - // Add 200 gwei, to mock an active validator - assert.NoError(t, os.UpdateBalancesAtIndex(types.ValidatorIndex(i), bal-2000000000)) - } + hs, _, _, keys := createStateSetup(t, 4, mockGen) vs := &Server{ - StateGen: mockGen, + StateGen: nil, HeadFetcher: &mockChain.ChainService{ State: hs, }, From 23bdce23546836e4f365baa1311196ba41169abc Mon Sep 17 00:00:00 2001 From: terence tsao Date: Wed, 1 Dec 2021 06:45:39 -0800 Subject: [PATCH 37/45] Fix grpc client connected... logging (#9956) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: RadosÅ‚aw Kapka --- beacon-chain/rpc/service.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon-chain/rpc/service.go b/beacon-chain/rpc/service.go index d4e7ad0470..a36b4e4307 100644 --- a/beacon-chain/rpc/service.go +++ b/beacon-chain/rpc/service.go @@ -382,7 +382,7 @@ func (s *Service) logNewClientConnection(ctx context.Context) { if !s.connectedRPCClients[clientInfo.Addr] { log.WithFields(logrus.Fields{ "addr": clientInfo.Addr.String(), - }).Infof("NewService gRPC client connected to beacon node") + }).Infof("gRPC client connected to beacon node") s.connectedRPCClients[clientInfo.Addr] = true } } From 3e61763bd7b90fbc2d8484dc2a1c4364094dfb33 Mon Sep 17 00:00:00 2001 From: Potuz Date: Wed, 1 Dec 2021 14:14:08 -0300 Subject: [PATCH 38/45] fix operation precedence (#9965) --- beacon-chain/monitor/process_block.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/beacon-chain/monitor/process_block.go b/beacon-chain/monitor/process_block.go index 5803aabfb4..db1a35712b 100644 --- a/beacon-chain/monitor/process_block.go +++ b/beacon-chain/monitor/process_block.go @@ -58,7 +58,7 @@ func (s *Service) processBlock(ctx context.Context, b block.SignedBeaconBlock) { s.processProposedBlock(state, root, blk) s.processAttestations(ctx, state, blk) - if blk.Slot()%AggregateReportingPeriod*params.BeaconConfig().SlotsPerEpoch == 0 { + if blk.Slot()%(AggregateReportingPeriod*params.BeaconConfig().SlotsPerEpoch) == 0 { s.logAggregatedPerformance() } } From 6ffba5c7697bd5f5d2135d90d5e2e1cdab33897f Mon Sep 17 00:00:00 2001 From: Potuz Date: Wed, 1 Dec 2021 14:39:43 -0300 Subject: [PATCH 39/45] Add v1alpha1_to_v2.go (#9966) * Add v1alpha1_to_v2.go * add tests * gazelle Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> --- proto/migration/BUILD.bazel | 2 + proto/migration/v1alpha1_to_v1.go | 158 ----------------------- proto/migration/v1alpha1_to_v1_test.go | 90 ------------- proto/migration/v1alpha1_to_v2.go | 167 +++++++++++++++++++++++++ proto/migration/v1alpha1_to_v2_test.go | 100 +++++++++++++++ 5 files changed, 269 insertions(+), 248 deletions(-) create mode 100644 proto/migration/v1alpha1_to_v2.go create mode 100644 proto/migration/v1alpha1_to_v2_test.go diff --git a/proto/migration/BUILD.bazel b/proto/migration/BUILD.bazel index 8dcebacaa6..4480c61535 100644 --- a/proto/migration/BUILD.bazel +++ b/proto/migration/BUILD.bazel @@ -5,6 +5,7 @@ go_library( srcs = [ "enums.go", "v1alpha1_to_v1.go", + "v1alpha1_to_v2.go", ], importpath = "github.com/prysmaticlabs/prysm/proto/migration", visibility = ["//visibility:public"], @@ -26,6 +27,7 @@ go_test( srcs = [ "enums_test.go", "v1alpha1_to_v1_test.go", + "v1alpha1_to_v2_test.go", ], embed = [":go_default_library"], deps = [ diff --git a/proto/migration/v1alpha1_to_v1.go b/proto/migration/v1alpha1_to_v1.go index a46d49b7dd..c50557a272 100644 --- a/proto/migration/v1alpha1_to_v1.go +++ b/proto/migration/v1alpha1_to_v1.go @@ -3,10 +3,8 @@ package migration import ( "github.com/pkg/errors" statev1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1" - statev2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2" "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1" - ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2" ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block" "google.golang.org/protobuf/proto" @@ -56,19 +54,6 @@ func V1ToV1Alpha1SignedBlock(v1Blk *ethpbv1.SignedBeaconBlock) (*ethpbalpha.Sign return v1alpha1Block, nil } -// AltairToV1Alpha1SignedBlock converts a v2 SignedBeaconBlockAltair proto to a v1alpha1 proto. -func AltairToV1Alpha1SignedBlock(altairBlk *ethpbv2.SignedBeaconBlockAltair) (*ethpbalpha.SignedBeaconBlockAltair, error) { - marshaledBlk, err := proto.Marshal(altairBlk) - if err != nil { - return nil, errors.Wrap(err, "could not marshal block") - } - v1alpha1Block := ðpbalpha.SignedBeaconBlockAltair{} - if err := proto.Unmarshal(marshaledBlk, v1alpha1Block); err != nil { - return nil, errors.Wrap(err, "could not unmarshal block") - } - return v1alpha1Block, nil -} - // V1Alpha1ToV1Block converts a v1alpha1 BeaconBlock proto to a v1 proto. func V1Alpha1ToV1Block(alphaBlk *ethpbalpha.BeaconBlock) (*ethpbv1.BeaconBlock, error) { marshaledBlk, err := proto.Marshal(alphaBlk) @@ -367,19 +352,6 @@ func SignedBeaconBlock(block block.SignedBeaconBlock) (*ethpbv1.SignedBeaconBloc return v1Block, nil } -// V1Alpha1BeaconBlockAltairToV2 converts a v1alpha1 Altair beacon block to a v2 Altair block. -func V1Alpha1BeaconBlockAltairToV2(v1alpha1Block *ethpbalpha.BeaconBlockAltair) (*ethpbv2.BeaconBlockAltair, error) { - marshaledBlk, err := proto.Marshal(v1alpha1Block) - if err != nil { - return nil, errors.Wrap(err, "could not marshal block") - } - v2Block := ðpbv2.BeaconBlockAltair{} - if err := proto.Unmarshal(marshaledBlk, v2Block); err != nil { - return nil, errors.Wrap(err, "could not unmarshal block") - } - return v2Block, nil -} - func BeaconStateToV1(state *statev1.BeaconState) (*ethpbv1.BeaconState, error) { sourceFork := state.Fork() sourceLatestBlockHeader := state.LatestBlockHeader() @@ -513,133 +485,3 @@ func BeaconStateToV1(state *statev1.BeaconState) (*ethpbv1.BeaconState, error) { return result, nil } - -func BeaconStateAltairToV2(altairState *statev2.BeaconState) (*ethpbv2.BeaconStateV2, error) { - sourceFork := altairState.Fork() - sourceLatestBlockHeader := altairState.LatestBlockHeader() - sourceEth1Data := altairState.Eth1Data() - sourceEth1DataVotes := altairState.Eth1DataVotes() - sourceValidators := altairState.Validators() - sourcePrevJustifiedCheckpoint := altairState.PreviousJustifiedCheckpoint() - sourceCurrJustifiedCheckpoint := altairState.CurrentJustifiedCheckpoint() - sourceFinalizedCheckpoint := altairState.FinalizedCheckpoint() - - resultEth1DataVotes := make([]*ethpbv1.Eth1Data, len(sourceEth1DataVotes)) - for i, vote := range sourceEth1DataVotes { - resultEth1DataVotes[i] = ðpbv1.Eth1Data{ - DepositRoot: bytesutil.SafeCopyBytes(vote.DepositRoot), - DepositCount: vote.DepositCount, - BlockHash: bytesutil.SafeCopyBytes(vote.BlockHash), - } - } - resultValidators := make([]*ethpbv1.Validator, len(sourceValidators)) - for i, validator := range sourceValidators { - resultValidators[i] = ðpbv1.Validator{ - Pubkey: bytesutil.SafeCopyBytes(validator.PublicKey), - WithdrawalCredentials: bytesutil.SafeCopyBytes(validator.WithdrawalCredentials), - EffectiveBalance: validator.EffectiveBalance, - Slashed: validator.Slashed, - ActivationEligibilityEpoch: validator.ActivationEligibilityEpoch, - ActivationEpoch: validator.ActivationEpoch, - ExitEpoch: validator.ExitEpoch, - WithdrawableEpoch: validator.WithdrawableEpoch, - } - } - - sourcePrevEpochParticipation, err := altairState.PreviousEpochParticipation() - if err != nil { - return nil, errors.Wrap(err, "could not get previous epoch participation") - } - sourceCurrEpochParticipation, err := altairState.CurrentEpochParticipation() - if err != nil { - return nil, errors.Wrap(err, "could not get current epoch participation") - } - sourceInactivityScores, err := altairState.InactivityScores() - if err != nil { - return nil, errors.Wrap(err, "could not get inactivity scores") - } - sourceCurrSyncCommittee, err := altairState.CurrentSyncCommittee() - if err != nil { - return nil, errors.Wrap(err, "could not get current sync committee") - } - sourceNextSyncCommittee, err := altairState.NextSyncCommittee() - if err != nil { - return nil, errors.Wrap(err, "could not get next sync committee") - } - - result := ðpbv2.BeaconStateV2{ - GenesisTime: altairState.GenesisTime(), - GenesisValidatorsRoot: bytesutil.SafeCopyBytes(altairState.GenesisValidatorRoot()), - Slot: altairState.Slot(), - Fork: ðpbv1.Fork{ - PreviousVersion: bytesutil.SafeCopyBytes(sourceFork.PreviousVersion), - CurrentVersion: bytesutil.SafeCopyBytes(sourceFork.CurrentVersion), - Epoch: sourceFork.Epoch, - }, - LatestBlockHeader: ðpbv1.BeaconBlockHeader{ - Slot: sourceLatestBlockHeader.Slot, - ProposerIndex: sourceLatestBlockHeader.ProposerIndex, - ParentRoot: bytesutil.SafeCopyBytes(sourceLatestBlockHeader.ParentRoot), - StateRoot: bytesutil.SafeCopyBytes(sourceLatestBlockHeader.StateRoot), - BodyRoot: bytesutil.SafeCopyBytes(sourceLatestBlockHeader.BodyRoot), - }, - BlockRoots: bytesutil.SafeCopy2dBytes(altairState.BlockRoots()), - StateRoots: bytesutil.SafeCopy2dBytes(altairState.StateRoots()), - HistoricalRoots: bytesutil.SafeCopy2dBytes(altairState.HistoricalRoots()), - Eth1Data: ðpbv1.Eth1Data{ - DepositRoot: bytesutil.SafeCopyBytes(sourceEth1Data.DepositRoot), - DepositCount: sourceEth1Data.DepositCount, - BlockHash: bytesutil.SafeCopyBytes(sourceEth1Data.BlockHash), - }, - Eth1DataVotes: resultEth1DataVotes, - Eth1DepositIndex: altairState.Eth1DepositIndex(), - Validators: resultValidators, - Balances: altairState.Balances(), - RandaoMixes: bytesutil.SafeCopy2dBytes(altairState.RandaoMixes()), - Slashings: altairState.Slashings(), - PreviousEpochParticipation: bytesutil.SafeCopyBytes(sourcePrevEpochParticipation), - CurrentEpochParticipation: bytesutil.SafeCopyBytes(sourceCurrEpochParticipation), - JustificationBits: bytesutil.SafeCopyBytes(altairState.JustificationBits()), - PreviousJustifiedCheckpoint: ðpbv1.Checkpoint{ - Epoch: sourcePrevJustifiedCheckpoint.Epoch, - Root: bytesutil.SafeCopyBytes(sourcePrevJustifiedCheckpoint.Root), - }, - CurrentJustifiedCheckpoint: ðpbv1.Checkpoint{ - Epoch: sourceCurrJustifiedCheckpoint.Epoch, - Root: bytesutil.SafeCopyBytes(sourceCurrJustifiedCheckpoint.Root), - }, - FinalizedCheckpoint: ðpbv1.Checkpoint{ - Epoch: sourceFinalizedCheckpoint.Epoch, - Root: bytesutil.SafeCopyBytes(sourceFinalizedCheckpoint.Root), - }, - InactivityScores: sourceInactivityScores, - CurrentSyncCommittee: ðpbv2.SyncCommittee{ - Pubkeys: bytesutil.SafeCopy2dBytes(sourceCurrSyncCommittee.Pubkeys), - AggregatePubkey: bytesutil.SafeCopyBytes(sourceCurrSyncCommittee.AggregatePubkey), - }, - NextSyncCommittee: ðpbv2.SyncCommittee{ - Pubkeys: bytesutil.SafeCopy2dBytes(sourceNextSyncCommittee.Pubkeys), - AggregatePubkey: bytesutil.SafeCopyBytes(sourceNextSyncCommittee.AggregatePubkey), - }, - } - - return result, nil -} - -func V1Alpha1SignedContributionAndProofToV2(alphaContribution *ethpbalpha.SignedContributionAndProof) *ethpbv2.SignedContributionAndProof { - result := ðpbv2.SignedContributionAndProof{ - Message: ðpbv2.ContributionAndProof{ - AggregatorIndex: alphaContribution.Message.AggregatorIndex, - Contribution: ðpbv2.SyncCommitteeContribution{ - Slot: alphaContribution.Message.Contribution.Slot, - BeaconBlockRoot: alphaContribution.Message.Contribution.BlockRoot, - SubcommitteeIndex: alphaContribution.Message.Contribution.SubcommitteeIndex, - AggregationBits: alphaContribution.Message.Contribution.AggregationBits, - Signature: alphaContribution.Message.Contribution.Signature, - }, - SelectionProof: alphaContribution.Message.SelectionProof, - }, - Signature: alphaContribution.Signature, - } - return result -} diff --git a/proto/migration/v1alpha1_to_v1_test.go b/proto/migration/v1alpha1_to_v1_test.go index 810c1181ee..0dfd641415 100644 --- a/proto/migration/v1alpha1_to_v1_test.go +++ b/proto/migration/v1alpha1_to_v1_test.go @@ -7,7 +7,6 @@ import ( "github.com/prysmaticlabs/go-bitfield" "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1" - ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2" ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper" "github.com/prysmaticlabs/prysm/testing/assert" @@ -120,35 +119,6 @@ func Test_V1ToV1Alpha1SignedBlock(t *testing.T) { assert.DeepEqual(t, v1Root, alphaRoot) } -func Test_AltairToV1Alpha1SignedBlock(t *testing.T) { - v2Block := util.HydrateV2SignedBeaconBlock(ðpbv2.SignedBeaconBlockAltair{}) - v2Block.Message.Slot = slot - v2Block.Message.ProposerIndex = validatorIndex - v2Block.Message.ParentRoot = parentRoot - v2Block.Message.StateRoot = stateRoot - v2Block.Message.Body.RandaoReveal = randaoReveal - v2Block.Message.Body.Eth1Data = ðpbv1.Eth1Data{ - DepositRoot: depositRoot, - DepositCount: depositCount, - BlockHash: blockHash, - } - syncCommitteeBits := bitfield.NewBitvector512() - syncCommitteeBits.SetBitAt(100, true) - v2Block.Message.Body.SyncAggregate = ðpbv1.SyncAggregate{ - SyncCommitteeBits: syncCommitteeBits, - SyncCommitteeSignature: signature, - } - v2Block.Signature = signature - - alphaBlock, err := AltairToV1Alpha1SignedBlock(v2Block) - require.NoError(t, err) - alphaRoot, err := alphaBlock.HashTreeRoot() - require.NoError(t, err) - v2Root, err := v2Block.HashTreeRoot() - require.NoError(t, err) - assert.DeepEqual(t, v2Root, alphaRoot) -} - func Test_V1ToV1Alpha1Block(t *testing.T) { alphaBlock := util.HydrateBeaconBlock(ðpbalpha.BeaconBlock{}) alphaBlock.Slot = slot @@ -465,35 +435,6 @@ func Test_V1AttestationToV1Alpha1(t *testing.T) { require.NoError(t, err) assert.DeepEqual(t, v1Root, v1Alpha1Root) } - -func Test_V1Alpha1BeaconBlockAltairToV2(t *testing.T) { - alphaBlock := util.HydrateBeaconBlockAltair(ðpbalpha.BeaconBlockAltair{}) - alphaBlock.Slot = slot - alphaBlock.ProposerIndex = validatorIndex - alphaBlock.ParentRoot = parentRoot - alphaBlock.StateRoot = stateRoot - alphaBlock.Body.RandaoReveal = randaoReveal - alphaBlock.Body.Eth1Data = ðpbalpha.Eth1Data{ - DepositRoot: depositRoot, - DepositCount: depositCount, - BlockHash: blockHash, - } - syncCommitteeBits := bitfield.NewBitvector512() - syncCommitteeBits.SetBitAt(100, true) - alphaBlock.Body.SyncAggregate = ðpbalpha.SyncAggregate{ - SyncCommitteeBits: syncCommitteeBits, - SyncCommitteeSignature: signature, - } - - v2Block, err := V1Alpha1BeaconBlockAltairToV2(alphaBlock) - require.NoError(t, err) - alphaRoot, err := alphaBlock.HashTreeRoot() - require.NoError(t, err) - v2Root, err := v2Block.HashTreeRoot() - require.NoError(t, err) - assert.DeepEqual(t, alphaRoot, v2Root) -} - func TestBeaconStateToV1(t *testing.T) { source, err := util.NewBeaconState(util.FillRootsNaturalOpt, func(state *ethpbalpha.BeaconState) error { state.GenesisTime = 1 @@ -689,34 +630,3 @@ func TestBeaconStateToV1(t *testing.T) { assert.Equal(t, types.Epoch(32), resultFinalizedCheckpoint.Epoch) assert.DeepEqual(t, bytesutil.PadTo([]byte("fcroot"), 32), resultFinalizedCheckpoint.Root) } - -func TestV1Alpha1SignedContributionAndProofToV2(t *testing.T) { - alphaContribution := ðpbalpha.SignedContributionAndProof{ - Message: ðpbalpha.ContributionAndProof{ - AggregatorIndex: validatorIndex, - Contribution: ðpbalpha.SyncCommitteeContribution{ - Slot: slot, - BlockRoot: blockHash, - SubcommitteeIndex: 1, - AggregationBits: bitfield.NewBitvector128(), - Signature: signature, - }, - SelectionProof: signature, - }, - Signature: signature, - } - v2Contribution := V1Alpha1SignedContributionAndProofToV2(alphaContribution) - require.NotNil(t, v2Contribution) - require.NotNil(t, v2Contribution.Message) - require.NotNil(t, v2Contribution.Message.Contribution) - assert.DeepEqual(t, signature, v2Contribution.Signature) - msg := v2Contribution.Message - assert.Equal(t, validatorIndex, msg.AggregatorIndex) - assert.DeepEqual(t, signature, msg.SelectionProof) - contrib := msg.Contribution - assert.Equal(t, slot, contrib.Slot) - assert.DeepEqual(t, blockHash, contrib.BeaconBlockRoot) - assert.Equal(t, uint64(1), contrib.SubcommitteeIndex) - assert.DeepEqual(t, bitfield.NewBitvector128(), contrib.AggregationBits) - assert.DeepEqual(t, signature, contrib.Signature) -} diff --git a/proto/migration/v1alpha1_to_v2.go b/proto/migration/v1alpha1_to_v2.go new file mode 100644 index 0000000000..37888e9367 --- /dev/null +++ b/proto/migration/v1alpha1_to_v2.go @@ -0,0 +1,167 @@ +package migration + +import ( + "github.com/pkg/errors" + statev2 "github.com/prysmaticlabs/prysm/beacon-chain/state/v2" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" + ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1" + ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2" + ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "google.golang.org/protobuf/proto" +) + +// V1Alpha1BeaconBlockAltairToV2 converts a v1alpha1 Altair beacon block to a v2 Altair block. +func V1Alpha1BeaconBlockAltairToV2(v1alpha1Block *ethpbalpha.BeaconBlockAltair) (*ethpbv2.BeaconBlockAltair, error) { + marshaledBlk, err := proto.Marshal(v1alpha1Block) + if err != nil { + return nil, errors.Wrap(err, "could not marshal block") + } + v2Block := ðpbv2.BeaconBlockAltair{} + if err := proto.Unmarshal(marshaledBlk, v2Block); err != nil { + return nil, errors.Wrap(err, "could not unmarshal block") + } + return v2Block, nil +} + +// AltairToV1Alpha1SignedBlock converts a v2 SignedBeaconBlockAltair proto to a v1alpha1 proto. +func AltairToV1Alpha1SignedBlock(altairBlk *ethpbv2.SignedBeaconBlockAltair) (*ethpbalpha.SignedBeaconBlockAltair, error) { + marshaledBlk, err := proto.Marshal(altairBlk) + if err != nil { + return nil, errors.Wrap(err, "could not marshal block") + } + v1alpha1Block := ðpbalpha.SignedBeaconBlockAltair{} + if err := proto.Unmarshal(marshaledBlk, v1alpha1Block); err != nil { + return nil, errors.Wrap(err, "could not unmarshal block") + } + return v1alpha1Block, nil +} + +func BeaconStateAltairToV2(altairState *statev2.BeaconState) (*ethpbv2.BeaconStateV2, error) { + sourceFork := altairState.Fork() + sourceLatestBlockHeader := altairState.LatestBlockHeader() + sourceEth1Data := altairState.Eth1Data() + sourceEth1DataVotes := altairState.Eth1DataVotes() + sourceValidators := altairState.Validators() + sourcePrevJustifiedCheckpoint := altairState.PreviousJustifiedCheckpoint() + sourceCurrJustifiedCheckpoint := altairState.CurrentJustifiedCheckpoint() + sourceFinalizedCheckpoint := altairState.FinalizedCheckpoint() + + resultEth1DataVotes := make([]*ethpbv1.Eth1Data, len(sourceEth1DataVotes)) + for i, vote := range sourceEth1DataVotes { + resultEth1DataVotes[i] = ðpbv1.Eth1Data{ + DepositRoot: bytesutil.SafeCopyBytes(vote.DepositRoot), + DepositCount: vote.DepositCount, + BlockHash: bytesutil.SafeCopyBytes(vote.BlockHash), + } + } + resultValidators := make([]*ethpbv1.Validator, len(sourceValidators)) + for i, validator := range sourceValidators { + resultValidators[i] = ðpbv1.Validator{ + Pubkey: bytesutil.SafeCopyBytes(validator.PublicKey), + WithdrawalCredentials: bytesutil.SafeCopyBytes(validator.WithdrawalCredentials), + EffectiveBalance: validator.EffectiveBalance, + Slashed: validator.Slashed, + ActivationEligibilityEpoch: validator.ActivationEligibilityEpoch, + ActivationEpoch: validator.ActivationEpoch, + ExitEpoch: validator.ExitEpoch, + WithdrawableEpoch: validator.WithdrawableEpoch, + } + } + + sourcePrevEpochParticipation, err := altairState.PreviousEpochParticipation() + if err != nil { + return nil, errors.Wrap(err, "could not get previous epoch participation") + } + sourceCurrEpochParticipation, err := altairState.CurrentEpochParticipation() + if err != nil { + return nil, errors.Wrap(err, "could not get current epoch participation") + } + sourceInactivityScores, err := altairState.InactivityScores() + if err != nil { + return nil, errors.Wrap(err, "could not get inactivity scores") + } + sourceCurrSyncCommittee, err := altairState.CurrentSyncCommittee() + if err != nil { + return nil, errors.Wrap(err, "could not get current sync committee") + } + sourceNextSyncCommittee, err := altairState.NextSyncCommittee() + if err != nil { + return nil, errors.Wrap(err, "could not get next sync committee") + } + + result := ðpbv2.BeaconStateV2{ + GenesisTime: altairState.GenesisTime(), + GenesisValidatorsRoot: bytesutil.SafeCopyBytes(altairState.GenesisValidatorRoot()), + Slot: altairState.Slot(), + Fork: ðpbv1.Fork{ + PreviousVersion: bytesutil.SafeCopyBytes(sourceFork.PreviousVersion), + CurrentVersion: bytesutil.SafeCopyBytes(sourceFork.CurrentVersion), + Epoch: sourceFork.Epoch, + }, + LatestBlockHeader: ðpbv1.BeaconBlockHeader{ + Slot: sourceLatestBlockHeader.Slot, + ProposerIndex: sourceLatestBlockHeader.ProposerIndex, + ParentRoot: bytesutil.SafeCopyBytes(sourceLatestBlockHeader.ParentRoot), + StateRoot: bytesutil.SafeCopyBytes(sourceLatestBlockHeader.StateRoot), + BodyRoot: bytesutil.SafeCopyBytes(sourceLatestBlockHeader.BodyRoot), + }, + BlockRoots: bytesutil.SafeCopy2dBytes(altairState.BlockRoots()), + StateRoots: bytesutil.SafeCopy2dBytes(altairState.StateRoots()), + HistoricalRoots: bytesutil.SafeCopy2dBytes(altairState.HistoricalRoots()), + Eth1Data: ðpbv1.Eth1Data{ + DepositRoot: bytesutil.SafeCopyBytes(sourceEth1Data.DepositRoot), + DepositCount: sourceEth1Data.DepositCount, + BlockHash: bytesutil.SafeCopyBytes(sourceEth1Data.BlockHash), + }, + Eth1DataVotes: resultEth1DataVotes, + Eth1DepositIndex: altairState.Eth1DepositIndex(), + Validators: resultValidators, + Balances: altairState.Balances(), + RandaoMixes: bytesutil.SafeCopy2dBytes(altairState.RandaoMixes()), + Slashings: altairState.Slashings(), + PreviousEpochParticipation: bytesutil.SafeCopyBytes(sourcePrevEpochParticipation), + CurrentEpochParticipation: bytesutil.SafeCopyBytes(sourceCurrEpochParticipation), + JustificationBits: bytesutil.SafeCopyBytes(altairState.JustificationBits()), + PreviousJustifiedCheckpoint: ðpbv1.Checkpoint{ + Epoch: sourcePrevJustifiedCheckpoint.Epoch, + Root: bytesutil.SafeCopyBytes(sourcePrevJustifiedCheckpoint.Root), + }, + CurrentJustifiedCheckpoint: ðpbv1.Checkpoint{ + Epoch: sourceCurrJustifiedCheckpoint.Epoch, + Root: bytesutil.SafeCopyBytes(sourceCurrJustifiedCheckpoint.Root), + }, + FinalizedCheckpoint: ðpbv1.Checkpoint{ + Epoch: sourceFinalizedCheckpoint.Epoch, + Root: bytesutil.SafeCopyBytes(sourceFinalizedCheckpoint.Root), + }, + InactivityScores: sourceInactivityScores, + CurrentSyncCommittee: ðpbv2.SyncCommittee{ + Pubkeys: bytesutil.SafeCopy2dBytes(sourceCurrSyncCommittee.Pubkeys), + AggregatePubkey: bytesutil.SafeCopyBytes(sourceCurrSyncCommittee.AggregatePubkey), + }, + NextSyncCommittee: ðpbv2.SyncCommittee{ + Pubkeys: bytesutil.SafeCopy2dBytes(sourceNextSyncCommittee.Pubkeys), + AggregatePubkey: bytesutil.SafeCopyBytes(sourceNextSyncCommittee.AggregatePubkey), + }, + } + + return result, nil +} + +func V1Alpha1SignedContributionAndProofToV2(alphaContribution *ethpbalpha.SignedContributionAndProof) *ethpbv2.SignedContributionAndProof { + result := ðpbv2.SignedContributionAndProof{ + Message: ðpbv2.ContributionAndProof{ + AggregatorIndex: alphaContribution.Message.AggregatorIndex, + Contribution: ðpbv2.SyncCommitteeContribution{ + Slot: alphaContribution.Message.Contribution.Slot, + BeaconBlockRoot: alphaContribution.Message.Contribution.BlockRoot, + SubcommitteeIndex: alphaContribution.Message.Contribution.SubcommitteeIndex, + AggregationBits: alphaContribution.Message.Contribution.AggregationBits, + Signature: alphaContribution.Message.Contribution.Signature, + }, + SelectionProof: alphaContribution.Message.SelectionProof, + }, + Signature: alphaContribution.Signature, + } + return result +} diff --git a/proto/migration/v1alpha1_to_v2_test.go b/proto/migration/v1alpha1_to_v2_test.go new file mode 100644 index 0000000000..a54d07ac0e --- /dev/null +++ b/proto/migration/v1alpha1_to_v2_test.go @@ -0,0 +1,100 @@ +package migration + +import ( + "testing" + + "github.com/prysmaticlabs/go-bitfield" + ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1" + ethpbv2 "github.com/prysmaticlabs/prysm/proto/eth/v2" + ethpbalpha "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1" + "github.com/prysmaticlabs/prysm/testing/assert" + "github.com/prysmaticlabs/prysm/testing/require" + "github.com/prysmaticlabs/prysm/testing/util" +) + +func TestV1Alpha1SignedContributionAndProofToV2(t *testing.T) { + alphaContribution := ðpbalpha.SignedContributionAndProof{ + Message: ðpbalpha.ContributionAndProof{ + AggregatorIndex: validatorIndex, + Contribution: ðpbalpha.SyncCommitteeContribution{ + Slot: slot, + BlockRoot: blockHash, + SubcommitteeIndex: 1, + AggregationBits: bitfield.NewBitvector128(), + Signature: signature, + }, + SelectionProof: signature, + }, + Signature: signature, + } + v2Contribution := V1Alpha1SignedContributionAndProofToV2(alphaContribution) + require.NotNil(t, v2Contribution) + require.NotNil(t, v2Contribution.Message) + require.NotNil(t, v2Contribution.Message.Contribution) + assert.DeepEqual(t, signature, v2Contribution.Signature) + msg := v2Contribution.Message + assert.Equal(t, validatorIndex, msg.AggregatorIndex) + assert.DeepEqual(t, signature, msg.SelectionProof) + contrib := msg.Contribution + assert.Equal(t, slot, contrib.Slot) + assert.DeepEqual(t, blockHash, contrib.BeaconBlockRoot) + assert.Equal(t, uint64(1), contrib.SubcommitteeIndex) + assert.DeepEqual(t, bitfield.NewBitvector128(), contrib.AggregationBits) + assert.DeepEqual(t, signature, contrib.Signature) +} +func Test_V1Alpha1BeaconBlockAltairToV2(t *testing.T) { + alphaBlock := util.HydrateBeaconBlockAltair(ðpbalpha.BeaconBlockAltair{}) + alphaBlock.Slot = slot + alphaBlock.ProposerIndex = validatorIndex + alphaBlock.ParentRoot = parentRoot + alphaBlock.StateRoot = stateRoot + alphaBlock.Body.RandaoReveal = randaoReveal + alphaBlock.Body.Eth1Data = ðpbalpha.Eth1Data{ + DepositRoot: depositRoot, + DepositCount: depositCount, + BlockHash: blockHash, + } + syncCommitteeBits := bitfield.NewBitvector512() + syncCommitteeBits.SetBitAt(100, true) + alphaBlock.Body.SyncAggregate = ðpbalpha.SyncAggregate{ + SyncCommitteeBits: syncCommitteeBits, + SyncCommitteeSignature: signature, + } + + v2Block, err := V1Alpha1BeaconBlockAltairToV2(alphaBlock) + require.NoError(t, err) + alphaRoot, err := alphaBlock.HashTreeRoot() + require.NoError(t, err) + v2Root, err := v2Block.HashTreeRoot() + require.NoError(t, err) + assert.DeepEqual(t, alphaRoot, v2Root) +} + +func Test_AltairToV1Alpha1SignedBlock(t *testing.T) { + v2Block := util.HydrateV2SignedBeaconBlock(ðpbv2.SignedBeaconBlockAltair{}) + v2Block.Message.Slot = slot + v2Block.Message.ProposerIndex = validatorIndex + v2Block.Message.ParentRoot = parentRoot + v2Block.Message.StateRoot = stateRoot + v2Block.Message.Body.RandaoReveal = randaoReveal + v2Block.Message.Body.Eth1Data = ðpbv1.Eth1Data{ + DepositRoot: depositRoot, + DepositCount: depositCount, + BlockHash: blockHash, + } + syncCommitteeBits := bitfield.NewBitvector512() + syncCommitteeBits.SetBitAt(100, true) + v2Block.Message.Body.SyncAggregate = ðpbv1.SyncAggregate{ + SyncCommitteeBits: syncCommitteeBits, + SyncCommitteeSignature: signature, + } + v2Block.Signature = signature + + alphaBlock, err := AltairToV1Alpha1SignedBlock(v2Block) + require.NoError(t, err) + alphaRoot, err := alphaBlock.HashTreeRoot() + require.NoError(t, err) + v2Root, err := v2Block.HashTreeRoot() + require.NoError(t, err) + assert.DeepEqual(t, v2Root, alphaRoot) +} From 5e2229ce9d7772681e9befc21bee3edcb06fbeab Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Thu, 2 Dec 2021 02:09:34 +0800 Subject: [PATCH 40/45] Update Libp2p to v0.15.1 (#9960) * fix deps * tidy it all * fix build * remove tls patch Co-authored-by: Preston Van Loon Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> --- beacon-chain/p2p/BUILD.bazel | 1 - beacon-chain/p2p/discovery.go | 7 +- deps.bzl | 186 ++++++++++-------- go.mod | 68 +++---- go.sum | 148 +++++++------- third_party/libp2p_tls.patch | 16 -- .../multiple_endpoints_grpc_resolver.go | 4 +- 7 files changed, 220 insertions(+), 210 deletions(-) delete mode 100644 third_party/libp2p_tls.patch diff --git a/beacon-chain/p2p/BUILD.bazel b/beacon-chain/p2p/BUILD.bazel index fb2140b141..7fb61a965f 100644 --- a/beacon-chain/p2p/BUILD.bazel +++ b/beacon-chain/p2p/BUILD.bazel @@ -74,7 +74,6 @@ go_library( "@com_github_ethereum_go_ethereum//p2p/enode:go_default_library", "@com_github_ethereum_go_ethereum//p2p/enr:go_default_library", "@com_github_ferranbt_fastssz//:go_default_library", - "@com_github_ipfs_go_ipfs_addr//:go_default_library", "@com_github_kevinms_leakybucket_go//:go_default_library", "@com_github_kr_pretty//:go_default_library", "@com_github_libp2p_go_libp2p//:go_default_library", diff --git a/beacon-chain/p2p/discovery.go b/beacon-chain/p2p/discovery.go index 997f00429a..4dff9ec93a 100644 --- a/beacon-chain/p2p/discovery.go +++ b/beacon-chain/p2p/discovery.go @@ -9,7 +9,6 @@ import ( "github.com/ethereum/go-ethereum/p2p/discover" "github.com/ethereum/go-ethereum/p2p/enode" "github.com/ethereum/go-ethereum/p2p/enr" - iaddr "github.com/ipfs/go-ipfs-addr" "github.com/libp2p/go-libp2p-core/network" "github.com/libp2p/go-libp2p-core/peer" ma "github.com/multiformats/go-multiaddr" @@ -454,11 +453,7 @@ func peersFromStringAddrs(addrs []string) ([]ma.Multiaddr, error) { } func multiAddrFromString(address string) (ma.Multiaddr, error) { - addr, err := iaddr.ParseString(address) - if err != nil { - return nil, err - } - return addr.Multiaddr(), nil + return ma.NewMultiaddr(address) } func udpVersionFromIP(ipAddr net.IP) string { diff --git a/deps.bzl b/deps.bzl index 77f34a6684..d72a58e3b6 100644 --- a/deps.bzl +++ b/deps.bzl @@ -81,8 +81,8 @@ def prysm_deps(): go_repository( name = "com_github_andreasbriese_bbloom", importpath = "github.com/AndreasBriese/bbloom", - sum = "h1:HD8gA2tkByhMAwYaFAX9w2l7vxvBQ5NMoxDrkhqhtn4=", - version = "v0.0.0-20190306092124-e2d15f34fcf9", + sum = "h1:cTp8I5+VIoKjsnZuH8vjyaysT/ses3EvZeaV/1UkF2M=", + version = "v0.0.0-20190825152654-46b345b51c96", ) go_repository( @@ -526,6 +526,13 @@ def prysm_deps(): sum = "h1:cqQfy1jclcSy/FwLjemeg3SR1yaINm74aQyupQ0Bl8M=", version = "v0.0.0-20201120205902-5459f2c99403", ) + go_repository( + name = "com_github_cncf_xds_go", + importpath = "github.com/cncf/xds/go", + sum = "h1:OZmjad4L3H8ncOIR8rnb5MREYqG8ixi5+WbeUsquF0c=", + version = "v0.0.0-20210312221358-fbca930ec8ed", + ) + go_repository( name = "com_github_cockroachdb_datadriven", importpath = "github.com/cockroachdb/datadriven", @@ -672,8 +679,8 @@ def prysm_deps(): go_repository( name = "com_github_dgraph_io_badger", importpath = "github.com/dgraph-io/badger", - sum = "h1:w9pSFNSdq/JPM1N12Fz/F/bzo993Is1W+Q7HjPzi7yg=", - version = "v1.6.1", + sum = "h1:mNw0qs90GVgGGWylh0umH5iag1j6n/PeJtNvL6KY/x8=", + version = "v1.6.2", ) go_repository( @@ -801,8 +808,8 @@ def prysm_deps(): go_repository( name = "com_github_envoyproxy_go_control_plane", importpath = "github.com/envoyproxy/go-control-plane", - sum = "h1:QyzYnTnPE15SQyUeqU6qLbWxMkwyAyu+vGksa0b7j00=", - version = "v0.9.9-0.20210217033140-668b12f5399d", + sum = "h1:dulLQAYQFYtG5MTplgNGHWuV2D+OBD+Z8lmDBmbLg+s=", + version = "v0.9.9-0.20210512163311-63b5d3c536b0", ) go_repository( name = "com_github_envoyproxy_protoc_gen_validate", @@ -818,8 +825,8 @@ def prysm_deps(): importpath = "github.com/ethereum/go-ethereum", patch_args = ["-p1"], patches = ["//third_party:com_github_ethereum_go_ethereum_secp256k1.patch"], - sum = "h1:Ft2GcLQrr2M89l49g9NoqgNtJZ9AahzMb7N6VXKZy5U=", - version = "v1.10.10", + sum = "h1:Mi7op8Vnhq9L2jpczrDzPm6c9XZbvHu0h4hoDq9u7QM=", + version = "v1.10.11-0.20211018203420-b97f57882c14", ) go_repository( @@ -1060,6 +1067,13 @@ def prysm_deps(): sum = "h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk=", version = "v1.8.0", ) + go_repository( + name = "com_github_go_task_slim_sprig", + importpath = "github.com/go-task/slim-sprig", + sum = "h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I=", + version = "v0.0.0-20210107165309-348f09dbbbc0", + ) + go_repository( name = "com_github_go_yaml_yaml", importpath = "github.com/go-yaml/yaml", @@ -1234,8 +1248,8 @@ def prysm_deps(): go_repository( name = "com_github_google_uuid", importpath = "github.com/google/uuid", - sum = "h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs=", - version = "v1.2.0", + sum = "h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=", + version = "v1.3.0", ) go_repository( name = "com_github_googleapis_gax_go", @@ -1326,8 +1340,8 @@ def prysm_deps(): go_repository( name = "com_github_grpc_ecosystem_grpc_gateway", importpath = "github.com/grpc-ecosystem/grpc-gateway", - sum = "h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI=", - version = "v1.9.5", + sum = "h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo=", + version = "v1.16.0", ) go_repository( name = "com_github_grpc_ecosystem_grpc_gateway_v2", @@ -1516,13 +1530,6 @@ def prysm_deps(): sum = "h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=", version = "v0.0.0-20170803182201-1ca381bf3150", ) - go_repository( - name = "com_github_iancoleman_strcase", - importpath = "github.com/iancoleman/strcase", - nofuzz = True, - sum = "h1:dJBk1m2/qjL1twPLf68JND55vvivMupZ4wIzE8CTdBw=", - version = "v0.1.3", - ) go_repository( name = "com_github_ianlancetaylor_cgosymbolizer", @@ -1627,8 +1634,8 @@ def prysm_deps(): go_repository( name = "com_github_ipfs_go_datastore", importpath = "github.com/ipfs/go-datastore", - sum = "h1:cwOUcGMLdLPWgu3SlrCckCMznaGADbPqE0r8h768/Dg=", - version = "v0.4.5", + sum = "h1:zU2cmweykxJ+ziXnA2cPtsLe8rdR/vrthOipLPuf6kc=", + version = "v0.4.6", ) go_repository( name = "com_github_ipfs_go_detect_race", @@ -1639,8 +1646,8 @@ def prysm_deps(): go_repository( name = "com_github_ipfs_go_ds_badger", importpath = "github.com/ipfs/go-ds-badger", - sum = "h1:J27YvAcpuA5IvZUbeBxOcQgqnYHUPxoygc6QxxkodZ4=", - version = "v0.2.3", + sum = "h1:ju5REfIm+v+wgVnQ19xGLYPHYHbYLR6qJfmMbCDSK1I=", + version = "v0.2.7", ) go_repository( name = "com_github_ipfs_go_ds_leveldb", @@ -1902,8 +1909,8 @@ def prysm_deps(): go_repository( name = "com_github_klauspost_cpuid_v2", importpath = "github.com/klauspost/cpuid/v2", - sum = "h1:bhR2mgIlno/Sfk4oUbH4sPlc83z1yGrN9bvqiq3C33I=", - version = "v2.0.8", + sum = "h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4=", + version = "v2.0.9", ) go_repository( name = "com_github_klauspost_crc32", @@ -2006,8 +2013,8 @@ def prysm_deps(): go_repository( name = "com_github_libp2p_go_addr_util", importpath = "github.com/libp2p/go-addr-util", - sum = "h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU=", - version = "v0.0.2", + sum = "h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI=", + version = "v0.1.0", ) go_repository( name = "com_github_libp2p_go_buffer_pool", @@ -2038,8 +2045,8 @@ def prysm_deps(): name = "com_github_libp2p_go_libp2p", build_file_proto_mode = "disable_global", importpath = "github.com/libp2p/go-libp2p", - sum = "h1:QCJE+jGyqxWdrSPuS4jByXCzosgaIg4SJTLCRplJ53w=", - version = "v0.14.4", + sum = "h1:wSC//fziln3aMTwgF2vOl0v+hTSFfsdr686Fl0uD3ug=", + version = "v0.15.1", ) go_repository( @@ -2072,8 +2079,8 @@ def prysm_deps(): name = "com_github_libp2p_go_libp2p_core", build_file_proto_mode = "disable_global", importpath = "github.com/libp2p/go-libp2p-core", - sum = "h1:3S8g006qG6Tjpj1JdRK2S+TWc2DJQKX/RG9fdLeiLSU=", - version = "v0.8.6", + sum = "h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU=", + version = "v0.9.0", ) go_repository( name = "com_github_libp2p_go_libp2p_crypto", @@ -2081,6 +2088,7 @@ def prysm_deps(): sum = "h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ=", version = "v0.1.0", ) + go_repository( name = "com_github_libp2p_go_libp2p_discovery", importpath = "github.com/libp2p/go-libp2p-discovery", @@ -2118,8 +2126,8 @@ def prysm_deps(): name = "com_github_libp2p_go_libp2p_noise", build_file_proto_mode = "disable_global", importpath = "github.com/libp2p/go-libp2p-noise", - sum = "h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds=", - version = "v0.2.0", + sum = "h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk=", + version = "v0.2.2", ) go_repository( name = "com_github_libp2p_go_libp2p_peer", @@ -2130,8 +2138,8 @@ def prysm_deps(): go_repository( name = "com_github_libp2p_go_libp2p_peerstore", importpath = "github.com/libp2p/go-libp2p-peerstore", - sum = "h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw=", - version = "v0.2.7", + sum = "h1:nJghUlUkFVvyk7ccsM67oFA6kqUkwyCM1G4WPVMCWYA=", + version = "v0.2.8", ) go_repository( name = "com_github_libp2p_go_libp2p_pnet", @@ -2144,14 +2152,14 @@ def prysm_deps(): name = "com_github_libp2p_go_libp2p_pubsub", build_file_proto_mode = "disable_global", importpath = "github.com/libp2p/go-libp2p-pubsub", - sum = "h1:OzcIuCWyJpOrWH0PTOfvxTzqFur4tiXpY5jXC8OxjyE=", - version = "v0.5.0", + sum = "h1:YkO3gG9J1mQBEMRrM5obiG3JD0L8RcrzIpoeLeiYqH8=", + version = "v0.5.6", ) go_repository( name = "com_github_libp2p_go_libp2p_quic_transport", importpath = "github.com/libp2p/go-libp2p-quic-transport", - sum = "h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0=", - version = "v0.10.0", + sum = "h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM=", + version = "v0.11.2", ) go_repository( @@ -2165,8 +2173,8 @@ def prysm_deps(): name = "com_github_libp2p_go_libp2p_swarm", build_file_proto_mode = "disable_global", importpath = "github.com/libp2p/go-libp2p-swarm", - sum = "h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E=", - version = "v0.5.0", + sum = "h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M=", + version = "v0.5.3", ) go_repository( name = "com_github_libp2p_go_libp2p_testing", @@ -2177,12 +2185,8 @@ def prysm_deps(): go_repository( name = "com_github_libp2p_go_libp2p_tls", importpath = "github.com/libp2p/go-libp2p-tls", - patch_args = ["-p1"], - patches = [ - "@prysm//third_party:libp2p_tls.patch", # See: https://github.com/libp2p/go-libp2p-tls/issues/66 - ], - sum = "h1:Ge/2CYttU7XdkPPqQ7e3TiuMFneLie1rM/UjRxPPGsI=", - version = "v0.1.4-0.20200421131144-8a8ad624a291", + sum = "h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA=", + version = "v0.2.0", ) go_repository( @@ -2275,8 +2279,8 @@ def prysm_deps(): go_repository( name = "com_github_libp2p_go_ws_transport", importpath = "github.com/libp2p/go-ws-transport", - sum = "h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k=", - version = "v0.4.0", + sum = "h1:cO6x4P0v6PfxbKnxmf5cY2Ny4OPDGYkUqNvZzp/zdlo=", + version = "v0.5.0", ) go_repository( name = "com_github_libp2p_go_yamux", @@ -2290,6 +2294,12 @@ def prysm_deps(): sum = "h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU=", version = "v2.2.0", ) + go_repository( + name = "com_github_libp2p_zeroconf_v2", + importpath = "github.com/libp2p/zeroconf/v2", + sum = "h1:9aZt2jwaBjkAJ/1cZnRTvzfN0eCDYaJWTjHST5tZIlk=", + version = "v2.1.0", + ) go_repository( name = "com_github_lightstep_lightstep_tracer_common_golang_gogo", @@ -2313,8 +2323,8 @@ def prysm_deps(): go_repository( name = "com_github_lucas_clemente_quic_go", importpath = "github.com/lucas-clemente/quic-go", - sum = "h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4=", - version = "v0.19.3", + sum = "h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78=", + version = "v0.21.2", ) go_repository( @@ -2355,18 +2365,26 @@ def prysm_deps(): sum = "h1:jvTsT/HpCn2UZJdP+UUB53FfUUgeOyG5K1ns0OJOGVs=", version = "v0.2.1", ) - go_repository( - name = "com_github_marten_seemann_qtls", - importpath = "github.com/marten-seemann/qtls", - sum = "h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc=", - version = "v0.10.0", - ) + go_repository( name = "com_github_marten_seemann_qtls_go1_15", importpath = "github.com/marten-seemann/qtls-go1-15", - sum = "h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ=", - version = "v0.1.1", + sum = "h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk=", + version = "v0.1.5", ) + go_repository( + name = "com_github_marten_seemann_qtls_go1_16", + importpath = "github.com/marten-seemann/qtls-go1-16", + sum = "h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco=", + version = "v0.1.4", + ) + go_repository( + name = "com_github_marten_seemann_qtls_go1_17", + importpath = "github.com/marten-seemann/qtls-go1-17", + sum = "h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM=", + version = "v0.1.0-rc.1", + ) + go_repository( name = "com_github_marten_seemann_tcp", importpath = "github.com/marten-seemann/tcp", @@ -2579,8 +2597,8 @@ def prysm_deps(): go_repository( name = "com_github_multiformats_go_multiaddr", importpath = "github.com/multiformats/go-multiaddr", - sum = "h1:vo2OTSAqnENB2rLk79pLtr+uhj+VAzSe3uef5q0lRSs=", - version = "v0.3.3", + sum = "h1:hL/K4ZJhJ5PTw3nwylq9lGU5yArzcAroZmex1ghSEkQ=", + version = "v0.4.0", ) go_repository( name = "com_github_multiformats_go_multiaddr_dns", @@ -2709,8 +2727,8 @@ def prysm_deps(): go_repository( name = "com_github_nxadm_tail", importpath = "github.com/nxadm/tail", - sum = "h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=", - version = "v1.4.4", + sum = "h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=", + version = "v1.4.8", ) go_repository( name = "com_github_nytimes_gziphandler", @@ -2752,14 +2770,14 @@ def prysm_deps(): go_repository( name = "com_github_onsi_ginkgo", importpath = "github.com/onsi/ginkgo", - sum = "h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=", - version = "v1.14.0", + sum = "h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc=", + version = "v1.16.4", ) go_repository( name = "com_github_onsi_gomega", importpath = "github.com/onsi/gomega", - sum = "h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=", - version = "v1.10.1", + sum = "h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak=", + version = "v1.13.0", ) go_repository( name = "com_github_op_go_logging", @@ -2931,14 +2949,14 @@ def prysm_deps(): go_repository( name = "com_github_prometheus_common", importpath = "github.com/prometheus/common", - sum = "h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE=", - version = "v0.29.0", + sum = "h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug=", + version = "v0.30.0", ) go_repository( name = "com_github_prometheus_procfs", importpath = "github.com/prometheus/procfs", - sum = "h1:OQZ41sZU9XkRpzrz8/TD0EldH/Rwbddkdu5wDyUwzfE=", - version = "v0.7.0", + sum = "h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=", + version = "v0.7.3", ) go_repository( name = "com_github_prometheus_prom2json", @@ -2974,8 +2992,8 @@ def prysm_deps(): go_repository( name = "com_github_prysmaticlabs_protoc_gen_go_cast", importpath = "github.com/prysmaticlabs/protoc-gen-go-cast", - sum = "h1:k7CCMwN7VooQ7GhfySnaVyI4/9+QbhJTdasoC6VOZOI=", - version = "v0.0.0-20210504233148-1e141af6a0a1", + sum = "h1:+jhXLjEYVW4qU2z5SOxlxN+Hv/A9FDf0HpfDurfMEz0=", + version = "v0.0.0-20211014160335-757fae4f38c6", ) go_repository( @@ -4074,6 +4092,12 @@ def prysm_deps(): sum = "h1:yGBYzYMewVL0yO9qqJv3Z5+IRhPdU7e9o/2oKpX4YvI=", version = "v0.2.1", ) + go_repository( + name = "io_opentelemetry_go_proto_otlp", + importpath = "go.opentelemetry.io/proto/otlp", + sum = "h1:rwOQPCuKAKmwGKq2aVNnYIibI6wnV7EvzgfTCzcdGg8=", + version = "v0.7.0", + ) go_repository( name = "io_rsc_binaryregexp", @@ -4148,8 +4172,8 @@ def prysm_deps(): name = "org_golang_google_grpc", build_file_proto_mode = "disable", importpath = "google.golang.org/grpc", - sum = "h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c=", - version = "v1.37.0", + sum = "h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q=", + version = "v1.40.0", ) go_repository( name = "org_golang_google_grpc_cmd_protoc_gen_go_grpc", @@ -4212,8 +4236,8 @@ def prysm_deps(): go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", - sum = "h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI=", - version = "v0.0.0-20210805182204-aaa1db679c0d", + sum = "h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=", + version = "v0.0.0-20210813160813-60bc85c4be6d", ) go_repository( name = "org_golang_x_oauth2", @@ -4250,8 +4274,8 @@ def prysm_deps(): go_repository( name = "org_golang_x_text", importpath = "golang.org/x/text", - sum = "h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=", - version = "v0.3.6", + sum = "h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=", + version = "v0.3.7", ) go_repository( name = "org_golang_x_time", @@ -4330,8 +4354,8 @@ def prysm_deps(): go_repository( name = "org_uber_go_zap", importpath = "go.uber.org/zap", - sum = "h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4=", - version = "v1.18.1", + sum = "h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE=", + version = "v1.19.0", ) go_repository( name = "tools_gotest", diff --git a/go.mod b/go.mod index 7c8fa5dfaa..e4aa47b358 100644 --- a/go.mod +++ b/go.mod @@ -4,27 +4,17 @@ go 1.16 require ( contrib.go.opencensus.io/exporter/jaeger v0.2.1 - github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect - github.com/allegro/bigcache v1.2.1 // indirect github.com/aristanetworks/goarista v0.0.0-20200521140103-6c3304613b30 github.com/bazelbuild/rules_go v0.23.2 - github.com/btcsuite/btcd v0.22.0-beta // indirect - github.com/cespare/cp v1.1.1 // indirect - github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect github.com/d4l3k/messagediff v1.2.1 - github.com/deckarep/golang-set v1.7.1 // indirect github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018 github.com/dustin/go-humanize v1.0.0 github.com/emicklei/dot v0.11.0 - github.com/ethereum/go-ethereum v1.10.10 - github.com/fatih/color v1.9.0 // indirect + github.com/ethereum/go-ethereum v1.10.11-0.20211018203420-b97f57882c14 github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.4.9 - github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect github.com/ghodss/yaml v1.0.0 - github.com/go-logr/logr v0.2.1 // indirect - github.com/go-ole/go-ole v1.2.5 // indirect github.com/go-yaml/yaml v2.1.0+incompatible github.com/gogo/protobuf v1.3.2 github.com/golang-jwt/jwt v3.2.2+incompatible @@ -33,7 +23,7 @@ require ( github.com/golang/protobuf v1.5.2 github.com/golang/snappy v0.0.4 github.com/google/gofuzz v1.2.0 - github.com/google/uuid v1.2.0 + github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 github.com/grpc-ecosystem/go-grpc-middleware v1.2.2 github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 @@ -41,48 +31,37 @@ require ( github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d github.com/herumi/bls-eth-go-binary v0.0.0-20210917013441-d37c07cfda4e github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279 - github.com/ipfs/go-ipfs-addr v0.0.1 - github.com/ipfs/go-log v1.0.5 // indirect github.com/ipfs/go-log/v2 v2.3.0 github.com/joonix/log v0.0.0-20200409080653-9c1d2ceb5f1d github.com/json-iterator/go v1.1.11 github.com/k0kubun/go-ansi v0.0.0-20180517002512-3bf9e2903213 github.com/kevinms/leakybucket-go v0.0.0-20200115003610-082473db97ca - github.com/klauspost/cpuid/v2 v2.0.8 // indirect - github.com/koron/go-ssdp v0.0.2 // indirect github.com/kr/pretty v0.2.1 - github.com/libp2p/go-libp2p v0.14.4 + github.com/libp2p/go-libp2p v0.15.1 github.com/libp2p/go-libp2p-blankhost v0.2.0 - github.com/libp2p/go-libp2p-core v0.8.6 - github.com/libp2p/go-libp2p-discovery v0.5.1 // indirect - github.com/libp2p/go-libp2p-noise v0.2.0 - github.com/libp2p/go-libp2p-peerstore v0.2.7 - github.com/libp2p/go-libp2p-pubsub v0.5.0 - github.com/libp2p/go-libp2p-swarm v0.5.0 - github.com/libp2p/go-libp2p-tls v0.1.4-0.20200421131144-8a8ad624a291 // indirect + github.com/libp2p/go-libp2p-core v0.9.0 + github.com/libp2p/go-libp2p-noise v0.2.2 + github.com/libp2p/go-libp2p-peerstore v0.2.8 + github.com/libp2p/go-libp2p-pubsub v0.5.6 + github.com/libp2p/go-libp2p-swarm v0.5.3 github.com/libp2p/go-tcp-transport v0.2.8 github.com/logrusorgru/aurora v2.0.3+incompatible github.com/manifoldco/promptui v0.7.0 - github.com/miekg/dns v1.1.43 // indirect github.com/minio/highwayhash v1.0.1 github.com/minio/sha256-simd v1.0.0 github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826 - github.com/multiformats/go-multiaddr v0.3.3 + github.com/multiformats/go-multiaddr v0.4.0 github.com/patrickmn/go-cache v2.1.0+incompatible github.com/paulbellamy/ratecounter v0.2.0 github.com/pborman/uuid v1.2.1 - github.com/peterh/liner v1.2.0 // indirect github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.11.0 github.com/prometheus/client_model v0.2.0 - github.com/prometheus/common v0.29.0 // indirect - github.com/prometheus/procfs v0.7.0 // indirect github.com/prometheus/prom2json v1.3.0 - github.com/prometheus/tsdb v0.10.0 // indirect github.com/prysmaticlabs/eth2-types v0.0.0-20210303084904-c9735a06829d github.com/prysmaticlabs/go-bitfield v0.0.0-20210809151128-385d8c5e3fb7 github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c - github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20210504233148-1e141af6a0a1 + github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20211014160335-757fae4f38c6 github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc github.com/rs/cors v1.7.0 github.com/schollz/progressbar/v3 v3.3.4 @@ -101,24 +80,35 @@ require ( github.com/x-cray/logrus-prefixed-formatter v0.5.2 go.etcd.io/bbolt v1.3.5 go.opencensus.io v0.23.0 - go.uber.org/atomic v1.9.0 // indirect go.uber.org/automaxprocs v1.3.0 - go.uber.org/multierr v1.7.0 // indirect - go.uber.org/zap v1.18.1 // indirect golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 golang.org/x/exp v0.0.0-20200513190911-00229845015e golang.org/x/sync v0.0.0-20210220032951-036812b2e83c - golang.org/x/sys v0.0.0-20211015200801-69063c4bb744 // indirect golang.org/x/tools v0.1.1 - google.golang.org/api v0.34.0 // indirect - google.golang.org/appengine v1.6.7 // indirect google.golang.org/genproto v0.0.0-20210426193834-eac7f76ac494 - google.golang.org/grpc v1.37.0 + google.golang.org/grpc v1.40.0 google.golang.org/protobuf v1.27.1 gopkg.in/d4l3k/messagediff.v1 v1.2.1 gopkg.in/yaml.v2 v2.4.0 - k8s.io/apimachinery v0.18.3 k8s.io/client-go v0.18.3 +) + +require ( + github.com/StackExchange/wmi v0.0.0-20210224194228-fe8f1750fd46 // indirect + github.com/allegro/bigcache v1.2.1 // indirect + github.com/cespare/cp v1.1.1 // indirect + github.com/coreos/go-systemd v0.0.0-20191104093116-d3cd4ed1dbcf // indirect + github.com/deckarep/golang-set v1.7.1 // indirect + github.com/fatih/color v1.9.0 // indirect + github.com/gballet/go-libpcsclite v0.0.0-20191108122812-4678299bea08 // indirect + github.com/go-logr/logr v0.2.1 // indirect + github.com/go-ole/go-ole v1.2.5 // indirect + github.com/peterh/liner v1.2.0 // indirect + github.com/prometheus/tsdb v0.10.0 // indirect + golang.org/x/sys v0.0.0-20211015200801-69063c4bb744 // indirect + google.golang.org/api v0.34.0 // indirect + google.golang.org/appengine v1.6.7 // indirect + k8s.io/apimachinery v0.18.3 k8s.io/klog/v2 v2.3.0 // indirect k8s.io/utils v0.0.0-20200520001619-278ece378a50 // indirect ) diff --git a/go.sum b/go.sum index c825bf4173..e538a07ce0 100644 --- a/go.sum +++ b/go.sum @@ -48,6 +48,7 @@ dmitri.shuralyov.com/state v0.0.0-20180228185332-28bcc343414c/go.mod h1:0PRwlb0D git.apache.org/thrift.git v0.0.0-20180902110319-2566ecd5d999/go.mod h1:fPE2ZNJGynbRyZ4dJvy6G277gSllfV2HJqblrnkyeyg= github.com/AndreasBriese/bbloom v0.0.0-20180913140656-343706a395b7/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/AndreasBriese/bbloom v0.0.0-20190306092124-e2d15f34fcf9/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= +github.com/AndreasBriese/bbloom v0.0.0-20190825152654-46b345b51c96/go.mod h1:bOvUY6CB00SOBii9/FifXqc0awNKxLFCL/+pkDPuyl8= github.com/Azure/azure-pipeline-go v0.2.1/go.mod h1:UGSo8XybXnIGZ3epmeBw7Jdz+HiUVpqIlpz/HKHylF4= github.com/Azure/azure-pipeline-go v0.2.2/go.mod h1:4rQ/NZncSvGqNkkOsNpOU1tgoNuIlp9AfUH5G1tvCHc= github.com/Azure/azure-storage-blob-go v0.7.0/go.mod h1:f9YQKtsG1nMisotuTPpO0tjNuEjKRYAcJU8/ydDI++4= @@ -141,13 +142,11 @@ github.com/btcsuite/btcd v0.0.0-20190213025234-306aecffea32/go.mod h1:DrZx5ec/dm github.com/btcsuite/btcd v0.0.0-20190523000118-16327141da8c/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.0.0-20190824003749-130ea5bddde3/go.mod h1:3J08xEfcugPacsc34/LKRU2yO7YmuT8yt28J8k2+rrI= github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ= -github.com/btcsuite/btcd v0.21.0-beta/go.mod h1:ZSWyehm27aAuS9bvkATT+Xte3hjHZ+MRgMY/8NJ7K94= github.com/btcsuite/btcd v0.22.0-beta h1:LTDpDKUM5EeOFBPM8IXpinEcmZ6FWfNZbE3lfrfdnWo= github.com/btcsuite/btcd v0.22.0-beta/go.mod h1:9n5ntfhhHQBIhUvlhDvD3Qg6fRUj4jkN0VB8L8svzOA= github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA= github.com/btcsuite/btcutil v0.0.0-20190207003914-4c204d697803/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg= -github.com/btcsuite/btcutil v1.0.2/go.mod h1:j9HUFwoQRsZL3V4n+qG+CUnEGHOarIxfC3Le2Yhbcts= github.com/btcsuite/btcutil v1.0.3-0.20201208143702-a53e38424cce/go.mod h1:0DVlHczLPewLcPGEIeUEzfOJhqGPQ0mJJRDBtD307+o= github.com/btcsuite/go-socks v0.0.0-20170105172521-4720035b7bfd/go.mod h1:HHNXQzUsZCxOoE+CPiyCTO6x34Zs86zZUiwtpXoGdtg= github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVaaLLH7j4eDXPRvw78tMflu7Ie2bzYOH4Y8rRKBY= @@ -182,6 +181,7 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cloudflare/cloudflare-go v0.14.0/go.mod h1:EnwdgGMaFOruiPZRFSgn+TsQ3hQ7C/YWzIGLeu5c304= github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk= +github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cockroachdb/datadriven v0.0.0-20190809214429-80d97fb3cbaa/go.mod h1:zn76sxSg3SzpJ0PPJaLDCu+Bu0Lg3sKTORVIj19EIF8= github.com/codahale/hdrhistogram v0.0.0-20161010025455-3a0bb77429bd/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI= github.com/consensys/bavard v0.1.8-0.20210406032232-f3452dc9b572/go.mod h1:Bpd0/3mZuaj6Sj+PqrmIquiOKy397AKGThQPaGzNXAQ= @@ -229,6 +229,7 @@ github.com/dgraph-io/badger v1.5.5-0.20190226225317-8115aed38f8f/go.mod h1:VZxzA github.com/dgraph-io/badger v1.6.0-rc1/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.0/go.mod h1:zwt7syl517jmP8s94KqSxTlM6IMsdhYy6psNgSztDR4= github.com/dgraph-io/badger v1.6.1/go.mod h1:FRmFw3uxvcpa8zG3Rxs0th+hCLIuaQg8HlNV5bjgnuU= +github.com/dgraph-io/badger v1.6.2/go.mod h1:JW2yswe3V058sS0kZ2h/AXeDSqFjxnZcRrVH//y2UQE= github.com/dgraph-io/ristretto v0.0.2/go.mod h1:KPxhHT9ZxKefz+PCeOGsrHpl1qZ7i70dGTu2u+Ahh6E= github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018 h1:cNcG4c2n5xanQzp2hMyxDxPYVQmZ91y4WN6fJFlndLo= github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018/go.mod h1:MIonLggsKgZLUSt414ExgwNtlOL5MuEoAJP514mwGe8= @@ -266,9 +267,10 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98= github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= +github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.10.10 h1:Ft2GcLQrr2M89l49g9NoqgNtJZ9AahzMb7N6VXKZy5U= -github.com/ethereum/go-ethereum v1.10.10/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= +github.com/ethereum/go-ethereum v1.10.11-0.20211018203420-b97f57882c14 h1:Mi7op8Vnhq9L2jpczrDzPm6c9XZbvHu0h4hoDq9u7QM= +github.com/ethereum/go-ethereum v1.10.11-0.20211018203420-b97f57882c14/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= @@ -337,6 +339,8 @@ github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG github.com/go-stack/stack v1.6.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0 h1:5SgMzNM5HxrEjV0ww2lTmX6E2Izsfxas4+YHWRs3Lsk= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0 h1:p104kn46Q8WdvHunIJ9dAyjPVtrBPhSr3KT2yUst43I= +github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE= github.com/go-yaml/yaml v2.1.0+incompatible h1:RYi2hDdss1u4YE7GwixGzWwVo47T8UQwnTLB6vQiq+o= github.com/go-yaml/yaml v2.1.0+incompatible/go.mod h1:w2MrLa16VYP0jy6N7M5kHaCkaLENm+P+Tv+MfurjSw0= github.com/gofrs/flock v0.8.0/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= @@ -361,7 +365,6 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190129154638-5b532d6fd5ef/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= -github.com/golang/groupcache v0.0.0-20191027212112-611e8accdfc9/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= @@ -442,8 +445,9 @@ github.com/google/uuid v1.0.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/google/uuid v1.1.1/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/google/uuid v1.2.0 h1:qJYtXnJRWmpe7m/3XlyhrsLrEURqHRM2kxzoxXqyUDs= github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= +github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= github.com/googleapis/gax-go v2.0.0+incompatible/go.mod h1:SFVmujtThgffbyetf+mdk2eWhX2bMyUtNHzFKcPA9HY= github.com/googleapis/gax-go/v2 v2.0.3/go.mod h1:LLvjysVCY1JZeum8Z6l8qUty8fiNwE08qbEPm1M08qg= github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg= @@ -475,8 +479,9 @@ github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92Bcuy github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= github.com/grpc-ecosystem/grpc-gateway v1.5.0/go.mod h1:RSKVYQBd5MCa4OVpNdGskqpgL2+G+NZTnrVHpWWfpdw= github.com/grpc-ecosystem/grpc-gateway v1.9.0/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= -github.com/grpc-ecosystem/grpc-gateway v1.9.5 h1:UImYN5qQ8tuGpGE16ZmjvcTtTw24zw1QAp/SlnNrZhI= github.com/grpc-ecosystem/grpc-gateway v1.9.5/go.mod h1:vNeuVxBJEsws4ogUvrchl83t/GYV9WGTSLVdBhOQFDY= +github.com/grpc-ecosystem/grpc-gateway v1.16.0 h1:gmcG1KaJ57LophUzW0Hy8NmPhnMZb4M0+kPpLofRdBo= +github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw= github.com/gxed/hashland/keccakpg v0.0.1/go.mod h1:kRzw3HkwxFU1mpmPP8v1WyQzwdGfmKFJ6tItnhQ67kU= github.com/gxed/hashland/murmur3 v0.0.1/go.mod h1:KjXop02n4/ckmZSnY2+HKcLud/tcmvhST0bie/0lS48= github.com/hashicorp/consul/api v1.1.0/go.mod h1:VmuI/Lkw1nC05EYQWNKwWGbkg+FbDBtguAZLlVdkD9Q= @@ -522,8 +527,6 @@ github.com/huin/goupnp v1.0.0/go.mod h1:n9v9KO1tAxYH82qOn+UTIFQDmx5n1Zxd/ClZDMX7 github.com/huin/goupnp v1.0.2 h1:RfGLP+h3mvisuWEyybxNq5Eft3NWhHLPeUN72kpKZoI= github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM= github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o= -github.com/iancoleman/strcase v0.1.3 h1:dJBk1m2/qjL1twPLf68JND55vvivMupZ4wIzE8CTdBw= -github.com/iancoleman/strcase v0.1.3/go.mod h1:SK73tn/9oHe+/Y0h39VT4UCxmurVJkR5NA7kMEAOgSE= github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279 h1:IpTHAzWv1pKDDWeJDY5VOHvqc2T9d3C8cPKEf2VPqHE= github.com/ianlancetaylor/cgosymbolizer v0.0.0-20200424224625-be1b05b0b279/go.mod h1:a5aratAVTWyz+nJMmDsN8O4XTfaLfdAsB1ysCmZX5Bw= github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc= @@ -558,17 +561,17 @@ github.com/ipfs/go-datastore v0.4.0/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13X github.com/ipfs/go-datastore v0.4.1/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.4/go.mod h1:SX/xMIKoCszPqp+z9JhPYCmoOoXTvaa13XEbGtsFUhA= github.com/ipfs/go-datastore v0.4.5/go.mod h1:eXTcaaiN6uOlVCLS9GjJUJtlvJfM3xk23w3fyfrmmJs= +github.com/ipfs/go-datastore v0.4.6/go.mod h1:XSipLSc64rFKSFRFGo1ecQl+WhYce3K7frtpHkyPFUc= github.com/ipfs/go-detect-race v0.0.1 h1:qX/xay2W3E4Q1U7d9lNs1sU9nvguX0a7319XbyQ6cOk= github.com/ipfs/go-detect-race v0.0.1/go.mod h1:8BNT7shDZPo99Q74BpGMK+4D8Mn4j46UU0LZ723meps= github.com/ipfs/go-ds-badger v0.0.2/go.mod h1:Y3QpeSFWQf6MopLTiZD+VT6IC1yZqaGmjvRcKeSGij8= github.com/ipfs/go-ds-badger v0.0.5/go.mod h1:g5AuuCGmr7efyzQhLL8MzwqcauPojGPUaHzfGTzuE3s= github.com/ipfs/go-ds-badger v0.2.1/go.mod h1:Tx7l3aTph3FMFrRS838dcSJh+jjA7cX9DrGVwx/NOwE= github.com/ipfs/go-ds-badger v0.2.3/go.mod h1:pEYw0rgg3FIrywKKnL+Snr+w/LjJZVMTBRn4FS6UHUk= +github.com/ipfs/go-ds-badger v0.2.7/go.mod h1:02rnztVKA4aZwDuaRPTf8mpqcKmXP7mLl6JPxd14JHA= github.com/ipfs/go-ds-leveldb v0.0.1/go.mod h1:feO8V3kubwsEF22n0YRQCffeb79OOYIykR4L04tMOYc= github.com/ipfs/go-ds-leveldb v0.4.1/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= github.com/ipfs/go-ds-leveldb v0.4.2/go.mod h1:jpbku/YqBSsBc1qgME8BkWS4AxzF2cEu1Ii2r79Hh9s= -github.com/ipfs/go-ipfs-addr v0.0.1 h1:DpDFybnho9v3/a1dzJ5KnWdThWD1HrFLpQ+tWIyBaFI= -github.com/ipfs/go-ipfs-addr v0.0.1/go.mod h1:uKTDljHT3Q3SUWzDLp3aYUi8MrY32fgNgogsIa0npjg= github.com/ipfs/go-ipfs-delay v0.0.0-20181109222059-70721b86a9a8/go.mod h1:8SP1YXK1M1kXuc4KJZINY3TQQ03J2rwBG9QfXmbRPrw= github.com/ipfs/go-ipfs-util v0.0.1/go.mod h1:spsl5z8KUnrve+73pOhSVZND1SIxPW5RyBCNzQxlJBc= github.com/ipfs/go-ipfs-util v0.0.2 h1:59Sswnk1MFaiq+VcaknX7aYEyGyGDAA73ilhEK2POp8= @@ -641,13 +644,14 @@ github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6 github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.9.8/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A= github.com/klauspost/compress v1.10.1/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= +github.com/klauspost/compress v1.11.7 h1:0hzRabrMN4tSTvMfnL3SCv1ZGeAP23ynzodBgaHeMeg= github.com/klauspost/compress v1.11.7/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs= github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid v1.2.3 h1:CCtW0xUnWGVINKvE/WWOYKdsPV6mawAtvQuSl8guwQs= github.com/klauspost/cpuid v1.2.3/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek= github.com/klauspost/cpuid/v2 v2.0.4/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= -github.com/klauspost/cpuid/v2 v2.0.8 h1:bhR2mgIlno/Sfk4oUbH4sPlc83z1yGrN9bvqiq3C33I= -github.com/klauspost/cpuid/v2 v2.0.8/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= +github.com/klauspost/cpuid/v2 v2.0.9 h1:lgaqFMSdTdQYdZ04uHyN2d/eKdOMyi2YLSvlQIBFYa4= +github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg= github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg= github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs= @@ -675,8 +679,9 @@ github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL github.com/leanovate/gopter v0.2.9/go.mod h1:U2L/78B+KVFIx2VmW6onHJQzXtFb+p5y3y2Sh+Jxxv8= github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/libp2p/go-addr-util v0.0.1/go.mod h1:4ac6O7n9rIAKB1dnd+s8IbbMXkt+oBpzX4/+RACcnlQ= -github.com/libp2p/go-addr-util v0.0.2 h1:7cWK5cdA5x72jX0g8iLrQWm5TRJZ6CzGdPEhWj7plWU= github.com/libp2p/go-addr-util v0.0.2/go.mod h1:Ecd6Fb3yIuLzq4bD7VcywcVSBtefcAwnUISBM3WG15E= +github.com/libp2p/go-addr-util v0.1.0 h1:acKsntI33w2bTU7tC9a0SaPimJGfSI0bFKC18ChxeVI= +github.com/libp2p/go-addr-util v0.1.0/go.mod h1:6I3ZYuFr2O/9D+SoyM0zEw0EF3YkldtTX406BpdQMqw= github.com/libp2p/go-buffer-pool v0.0.1/go.mod h1:xtyIz9PMobb13WaxR6Zo1Pd1zXJKYg0a8KiIvDp3TzQ= github.com/libp2p/go-buffer-pool v0.0.2 h1:QNK2iAFa8gjAe1SPz6mHSMuCcjs+X1wlHzeOSqcmlfs= github.com/libp2p/go-buffer-pool v0.0.2/go.mod h1:MvaB6xw5vOrDl8rYZGLFdKAuk/hRoRZd1Vi32+RXyFM= @@ -694,8 +699,8 @@ github.com/libp2p/go-libp2p v0.6.1/go.mod h1:CTFnWXogryAHjXAKEbOf1OWY+VeAP3lDMZk github.com/libp2p/go-libp2p v0.7.0/go.mod h1:hZJf8txWeCduQRDC/WSqBGMxaTHCOYHt2xSU1ivxn0k= github.com/libp2p/go-libp2p v0.7.4/go.mod h1:oXsBlTLF1q7pxr+9w6lqzS1ILpyHsaBPniVO7zIHGMw= github.com/libp2p/go-libp2p v0.8.1/go.mod h1:QRNH9pwdbEBpx5DTJYg+qxcVaDMAz3Ee/qDKwXujH5o= -github.com/libp2p/go-libp2p v0.14.4 h1:QCJE+jGyqxWdrSPuS4jByXCzosgaIg4SJTLCRplJ53w= -github.com/libp2p/go-libp2p v0.14.4/go.mod h1:EIRU0Of4J5S8rkockZM7eJp2S0UrCyi55m2kJVru3rM= +github.com/libp2p/go-libp2p v0.15.1 h1:wSC//fziln3aMTwgF2vOl0v+hTSFfsdr686Fl0uD3ug= +github.com/libp2p/go-libp2p v0.15.1/go.mod h1:93vekOmNoLAcHXUYYEBot0Df/Z6tm46xu9NeCaiKdnM= github.com/libp2p/go-libp2p-autonat v0.1.1/go.mod h1:OXqkeGOY2xJVWKAGV2inNF5aKN/djNA3fdpCWloIudE= github.com/libp2p/go-libp2p-autonat v0.2.0/go.mod h1:DX+9teU4pEEoZUqR1PiMlqliONQdNbfzE1C718tcViI= github.com/libp2p/go-libp2p-autonat v0.2.1/go.mod h1:MWtAhV5Ko1l6QBsHQNSuM6b1sRkXrpk0/LqCr+vCVxI= @@ -731,16 +736,12 @@ github.com/libp2p/go-libp2p-core v0.7.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJB github.com/libp2p/go-libp2p-core v0.8.0/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.1/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= github.com/libp2p/go-libp2p-core v0.8.2/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.3/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.5/go.mod h1:FfewUH/YpvWbEB+ZY9AQRQ4TAD8sJBt/G1rVvhz5XT8= -github.com/libp2p/go-libp2p-core v0.8.6 h1:3S8g006qG6Tjpj1JdRK2S+TWc2DJQKX/RG9fdLeiLSU= github.com/libp2p/go-libp2p-core v0.8.6/go.mod h1:dgHr0l0hIKfWpGpqAMbpo19pen9wJfdCGv51mTmdpmM= -github.com/libp2p/go-libp2p-crypto v0.0.1/go.mod h1:yJkNyDmO341d5wwXxDUGO0LykUVT72ImHNUqh5D/dBE= -github.com/libp2p/go-libp2p-crypto v0.1.0 h1:k9MFy+o2zGDNGsaoZl0MA3iZ75qXxr9OOoAZF+sD5OQ= +github.com/libp2p/go-libp2p-core v0.9.0 h1:t97Mv0LIBZlP2FXVRNKKVzHJCIjbIWGxYptGId4+htU= +github.com/libp2p/go-libp2p-core v0.9.0/go.mod h1:ESsbz31oC3C1AvMJoGx26RTuCkNhmkSRCqZ0kQtJ2/8= github.com/libp2p/go-libp2p-crypto v0.1.0/go.mod h1:sPUokVISZiy+nNuTTH/TY+leRSxnFj/2GLjtOTW90hI= github.com/libp2p/go-libp2p-discovery v0.2.0/go.mod h1:s4VGaxYMbw4+4+tsoQTqh7wfxg97AEdo4GYBt6BadWg= github.com/libp2p/go-libp2p-discovery v0.3.0/go.mod h1:o03drFnz9BVAZdzC/QUQ+NeQOu38Fu7LJGEOK2gQltw= -github.com/libp2p/go-libp2p-discovery v0.5.0/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-discovery v0.5.1 h1:CJylx+h2+4+s68GvrM4pGNyfNhOYviWBPtVv5PA7sfo= github.com/libp2p/go-libp2p-discovery v0.5.1/go.mod h1:+srtPIU9gDaBNu//UHvcdliKBIcr4SfDcm0/PfPJLug= github.com/libp2p/go-libp2p-loggables v0.1.0/go.mod h1:EyumB2Y6PrYjr55Q3/tiJ/o3xoDasoRYM7nOzEpoa90= @@ -748,7 +749,6 @@ github.com/libp2p/go-libp2p-mplex v0.2.0/go.mod h1:Ejl9IyjvXJ0T9iqUTE1jpYATQ9NM3 github.com/libp2p/go-libp2p-mplex v0.2.1/go.mod h1:SC99Rxs8Vuzrf/6WhmH41kNn13TiYdAWNYHrwImKLnE= github.com/libp2p/go-libp2p-mplex v0.2.2/go.mod h1:74S9eum0tVQdAfFiKxAyKzNdSuLqw5oadDq7+L/FELo= github.com/libp2p/go-libp2p-mplex v0.2.3/go.mod h1:CK3p2+9qH9x+7ER/gWWDYJ3QW5ZxWDkm+dVvjfuG3ek= -github.com/libp2p/go-libp2p-mplex v0.4.0/go.mod h1:yCyWJE2sc6TBTnFpjvLuEJgTSw/u+MamvzILKdX7asw= github.com/libp2p/go-libp2p-mplex v0.4.1 h1:/pyhkP1nLwjG3OM+VuaNJkQT/Pqq73WzB3aDN3Fx1sc= github.com/libp2p/go-libp2p-mplex v0.4.1/go.mod h1:cmy+3GfqfM1PceHTLL7zQzAAYaryDu6iPSC+CIb094g= github.com/libp2p/go-libp2p-nat v0.0.5/go.mod h1:1qubaE5bTZMJE+E/uu2URroMbzdubFz1ChgiN79yKPE= @@ -756,10 +756,8 @@ github.com/libp2p/go-libp2p-nat v0.0.6 h1:wMWis3kYynCbHoyKLPBEMu4YRLltbm8Mk08HGS github.com/libp2p/go-libp2p-nat v0.0.6/go.mod h1:iV59LVhB3IkFvS6S6sauVTSOrNEANnINbI/fkaLimiw= github.com/libp2p/go-libp2p-netutil v0.1.0 h1:zscYDNVEcGxyUpMd0JReUZTrpMfia8PmLKcKF72EAMQ= github.com/libp2p/go-libp2p-netutil v0.1.0/go.mod h1:3Qv/aDqtMLTUyQeundkKsA+YCThNdbQD54k3TqjpbFU= -github.com/libp2p/go-libp2p-noise v0.2.0 h1:wmk5nhB9a2w2RxMOyvsoKjizgJOEaJdfAakr0jN8gds= -github.com/libp2p/go-libp2p-noise v0.2.0/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= -github.com/libp2p/go-libp2p-peer v0.0.1/go.mod h1:nXQvOBbwVqoP+T5Y5nCjeH4sP9IX/J0AMzcDUVruVoo= -github.com/libp2p/go-libp2p-peer v0.2.0 h1:EQ8kMjaCUwt/Y5uLgjT8iY2qg0mGUT0N1zUjer50DsY= +github.com/libp2p/go-libp2p-noise v0.2.2 h1:MRt5XGfYziDXIUy2udtMWfPmzZqUDYoC1FZoKnqPzwk= +github.com/libp2p/go-libp2p-noise v0.2.2/go.mod h1:IEbYhBBzGyvdLBoxxULL/SGbJARhUeqlO8lVSREYu2Q= github.com/libp2p/go-libp2p-peer v0.2.0/go.mod h1:RCffaCvUyW2CJmG2gAWVqwePwW7JMgxjsHm7+J5kjWY= github.com/libp2p/go-libp2p-peerstore v0.1.0/go.mod h1:2CeHkQsr8svp4fZ+Oi9ykN1HBb6u0MOvdJ7YIsmcwtY= github.com/libp2p/go-libp2p-peerstore v0.1.3/go.mod h1:BJ9sHlm59/80oSkpWgr1MyY1ciXAXV397W6h1GH/uKI= @@ -767,14 +765,14 @@ github.com/libp2p/go-libp2p-peerstore v0.2.0/go.mod h1:N2l3eVIeAitSg3Pi2ipSrJYnq github.com/libp2p/go-libp2p-peerstore v0.2.1/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.2/go.mod h1:NQxhNjWxf1d4w6PihR8btWIRjwRLBr4TYKfNgrUkOPA= github.com/libp2p/go-libp2p-peerstore v0.2.6/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= -github.com/libp2p/go-libp2p-peerstore v0.2.7 h1:83JoLxyR9OYTnNfB5vvFqvMUv/xDNa6NoPHnENhBsGw= -github.com/libp2p/go-libp2p-peerstore v0.2.7/go.mod h1:ss/TWTgHZTMpsU/oKVVPQCGuDHItOpf2W8RxAi50P2s= +github.com/libp2p/go-libp2p-peerstore v0.2.8 h1:nJghUlUkFVvyk7ccsM67oFA6kqUkwyCM1G4WPVMCWYA= +github.com/libp2p/go-libp2p-peerstore v0.2.8/go.mod h1:gGiPlXdz7mIHd2vfAsHzBNAMqSDkt2UBFwgcITgw1lA= github.com/libp2p/go-libp2p-pnet v0.2.0 h1:J6htxttBipJujEjz1y0a5+eYoiPcFHhSYHH6na5f0/k= github.com/libp2p/go-libp2p-pnet v0.2.0/go.mod h1:Qqvq6JH/oMZGwqs3N1Fqhv8NVhrdYcO0BW4wssv21LA= -github.com/libp2p/go-libp2p-pubsub v0.5.0 h1:OzcIuCWyJpOrWH0PTOfvxTzqFur4tiXpY5jXC8OxjyE= -github.com/libp2p/go-libp2p-pubsub v0.5.0/go.mod h1:MKnrsQkFgPcrQs1KVmOXy6Uz2RDQ1xO7dQo/P0Ba+ig= -github.com/libp2p/go-libp2p-quic-transport v0.10.0 h1:koDCbWD9CCHwcHZL3/WEvP2A+e/o5/W5L3QS/2SPMA0= -github.com/libp2p/go-libp2p-quic-transport v0.10.0/go.mod h1:RfJbZ8IqXIhxBRm5hqUEJqjiiY8xmEuq3HUDS993MkA= +github.com/libp2p/go-libp2p-pubsub v0.5.6 h1:YkO3gG9J1mQBEMRrM5obiG3JD0L8RcrzIpoeLeiYqH8= +github.com/libp2p/go-libp2p-pubsub v0.5.6/go.mod h1:gVOzwebXVdSMDQBTfH8ACO5EJ4SQrvsHqCmYsCZpD0E= +github.com/libp2p/go-libp2p-quic-transport v0.11.2 h1:p1YQDZRHH4Cv2LPtHubqlQ9ggz4CKng/REZuXZbZMhM= +github.com/libp2p/go-libp2p-quic-transport v0.11.2/go.mod h1:wlanzKtIh6pHrq+0U3p3DY9PJfGqxMgPaGKaK5LifwQ= github.com/libp2p/go-libp2p-secio v0.1.0/go.mod h1:tMJo2w7h3+wN4pgU2LSYeiKPrfqBgkOsdiKK77hE7c8= github.com/libp2p/go-libp2p-secio v0.2.0/go.mod h1:2JdZepB8J5V9mBp79BmwsaPQhRPNN2NrnB2lKQcdy6g= github.com/libp2p/go-libp2p-secio v0.2.1/go.mod h1:cWtZpILJqkqrSkiYcDBh5lA3wbT2Q+hz3rJQq3iftD8= @@ -784,9 +782,8 @@ github.com/libp2p/go-libp2p-swarm v0.2.2/go.mod h1:fvmtQ0T1nErXym1/aa1uJEyN7JzaT github.com/libp2p/go-libp2p-swarm v0.2.3/go.mod h1:P2VO/EpxRyDxtChXz/VPVXyTnszHvokHKRhfkEgFKNM= github.com/libp2p/go-libp2p-swarm v0.2.8/go.mod h1:JQKMGSth4SMqonruY0a8yjlPVIkb0mdNSwckW7OYziM= github.com/libp2p/go-libp2p-swarm v0.3.0/go.mod h1:hdv95GWCTmzkgeJpP+GK/9D9puJegb7H57B5hWQR5Kk= -github.com/libp2p/go-libp2p-swarm v0.4.3/go.mod h1:mmxP1pGBSc1Arw4F5DIjcpjFAmsRzA1KADuMtMuCT4g= -github.com/libp2p/go-libp2p-swarm v0.5.0 h1:HIK0z3Eqoo8ugmN8YqWAhD2RORgR+3iNXYG4U2PFd1E= -github.com/libp2p/go-libp2p-swarm v0.5.0/go.mod h1:sU9i6BoHE0Ve5SKz3y9WfKrh8dUat6JknzUehFx8xW4= +github.com/libp2p/go-libp2p-swarm v0.5.3 h1:hsYaD/y6+kZff1o1Mc56NcuwSg80lIphTS/zDk3mO4M= +github.com/libp2p/go-libp2p-swarm v0.5.3/go.mod h1:NBn7eNW2lu568L7Ns9wdFrOhgRlkRnIDg0FLKbuu3i8= github.com/libp2p/go-libp2p-testing v0.0.2/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.3/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= github.com/libp2p/go-libp2p-testing v0.0.4/go.mod h1:gvchhf3FQOtBdr+eFUABet5a4MBLK8jM3V4Zghvmi+E= @@ -798,12 +795,12 @@ github.com/libp2p/go-libp2p-testing v0.4.0/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotl github.com/libp2p/go-libp2p-testing v0.4.2 h1:IOiA5mMigi+eEjf4J+B7fepDhsjtsoWA9QbsCqbNp5U= github.com/libp2p/go-libp2p-testing v0.4.2/go.mod h1:Q+PFXYoiYFN5CAEG2w3gLPEzotlKsNSbKQ/lImlOWF0= github.com/libp2p/go-libp2p-tls v0.1.3/go.mod h1:wZfuewxOndz5RTnCAxFliGjvYSDA40sKitV4c50uI1M= -github.com/libp2p/go-libp2p-tls v0.1.4-0.20200421131144-8a8ad624a291 h1:Ge/2CYttU7XdkPPqQ7e3TiuMFneLie1rM/UjRxPPGsI= -github.com/libp2p/go-libp2p-tls v0.1.4-0.20200421131144-8a8ad624a291/go.mod h1:j1RjQWh/Ek3CRkHIn9sbVzW++n+yK2AnWtM4kZqlTFY= +github.com/libp2p/go-libp2p-tls v0.2.0 h1:N8i5wPiHudA+02sfW85R2nUbybPm7agjAywZc6pd3xA= +github.com/libp2p/go-libp2p-tls v0.2.0/go.mod h1:twrp2Ci4lE2GYspA1AnlYm+boYjqVruxDKJJj7s6xrc= github.com/libp2p/go-libp2p-transport-upgrader v0.1.1/go.mod h1:IEtA6or8JUbsV07qPW4r01GnTenLW4oi3lOPbUMGJJA= github.com/libp2p/go-libp2p-transport-upgrader v0.2.0/go.mod h1:mQcrHj4asu6ArfSoMuyojOdjx73Q47cYD7s5+gZOlns= github.com/libp2p/go-libp2p-transport-upgrader v0.3.0/go.mod h1:i+SKzbRnvXdVbU3D1dwydnTmKRPXiAR/fyvi1dXuL4o= -github.com/libp2p/go-libp2p-transport-upgrader v0.4.2/go.mod h1:NR8ne1VwfreD5VIWIU62Agt/J18ekORFU/j1i2y8zvk= +github.com/libp2p/go-libp2p-transport-upgrader v0.4.3/go.mod h1:bpkldbOWXMrXhpZbSV1mQxTrefOg2Fi+k1ClDSA4ppw= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6 h1:SHt3g0FslnqIkEWF25YOB8UCOCTpGAVvHRWQYJ+veiI= github.com/libp2p/go-libp2p-transport-upgrader v0.4.6/go.mod h1:JE0WQuQdy+uLZ5zOaI3Nw9dWGYJIA7mywEtP2lMvnyk= github.com/libp2p/go-libp2p-yamux v0.2.0/go.mod h1:Db2gU+XfLpm6E4rG5uGCFX6uXA8MEXOxFcRoXUODaK8= @@ -823,7 +820,6 @@ github.com/libp2p/go-mplex v0.0.3/go.mod h1:pK5yMLmOoBR1pNCqDlA2GQrdAVTMkqFalaTW github.com/libp2p/go-mplex v0.1.0/go.mod h1:SXgmdki2kwCUlCCbfGLEgHjC4pFqhTp0ZoV6aiKgxDU= github.com/libp2p/go-mplex v0.1.1/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= github.com/libp2p/go-mplex v0.1.2/go.mod h1:Xgz2RDCi3co0LeZfgjm4OgUF15+sVR8SRcu3SFXI1lk= -github.com/libp2p/go-mplex v0.2.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-mplex v0.3.0 h1:U1T+vmCYJaEoDJPV1aq31N56hS+lJgb397GsylNSgrU= github.com/libp2p/go-mplex v0.3.0/go.mod h1:0Oy/A9PQlwBytDRp4wSkFnzHYDKcpLot35JQ6msjvYQ= github.com/libp2p/go-msgio v0.0.2/go.mod h1:63lBBgOTDKQL6EWazRMCwXsEeEeK9O2Cd+0+6OOuipQ= @@ -849,7 +845,6 @@ github.com/libp2p/go-reuseport v0.0.2 h1:XSG94b1FJfGA01BUrT82imejHQyTxO4jEWqheyC github.com/libp2p/go-reuseport v0.0.2/go.mod h1:SPD+5RwGC7rcnzngoYC86GjPzjSywuQyMVAheVBD9nQ= github.com/libp2p/go-reuseport-transport v0.0.2/go.mod h1:YkbSDrvjUVDL6b8XqriyA20obEtsW9BLkuOUyQAOCbs= github.com/libp2p/go-reuseport-transport v0.0.3/go.mod h1:Spv+MPft1exxARzP2Sruj2Wb5JSyHNncjf1Oi2dEbzM= -github.com/libp2p/go-reuseport-transport v0.0.4/go.mod h1:trPa7r/7TJK/d+0hdBLOCGvpQQVOU74OXbNCIMkufGw= github.com/libp2p/go-reuseport-transport v0.0.5 h1:lJzi+vSYbyJj2faPKLxNGWEIBcaV/uJmyvsUxXy2mLw= github.com/libp2p/go-reuseport-transport v0.0.5/go.mod h1:TC62hhPc8qs5c/RoXDZG6YmjK+/YWUPC0yYmeUecbjc= github.com/libp2p/go-sockaddr v0.0.2/go.mod h1:syPvOmNs24S3dFVGJA1/mrqdeijPxLV2Le3BRLKd68k= @@ -863,13 +858,13 @@ github.com/libp2p/go-stream-muxer-multistream v0.3.0/go.mod h1:yDh8abSIzmZtqtOt6 github.com/libp2p/go-tcp-transport v0.1.0/go.mod h1:oJ8I5VXryj493DEJ7OsBieu8fcg2nHGctwtInJVpipc= github.com/libp2p/go-tcp-transport v0.1.1/go.mod h1:3HzGvLbx6etZjnFlERyakbaYPdfjg2pWP97dFZworkY= github.com/libp2p/go-tcp-transport v0.2.0/go.mod h1:vX2U0CnWimU4h0SGSEsg++AzvBcroCGYw28kh94oLe0= -github.com/libp2p/go-tcp-transport v0.2.4/go.mod h1:9dvr03yqrPyYGIEN6Dy5UvdJZjyPFvl1S/igQ5QD1SU= +github.com/libp2p/go-tcp-transport v0.2.7/go.mod h1:lue9p1b3VmZj1MhhEGB/etmvF/nBQ0X9CW2DutBT3MM= github.com/libp2p/go-tcp-transport v0.2.8 h1:aLjX+Nkz+kIz3uA56WtlGKRSAnKDvnqKmv1qF4EyyE4= github.com/libp2p/go-tcp-transport v0.2.8/go.mod h1:64rSfVidkYPLqbzpcN2IwHY4pmgirp67h++hZ/rcndQ= github.com/libp2p/go-ws-transport v0.2.0/go.mod h1:9BHJz/4Q5A9ludYWKoGCFC5gUElzlHoKzu0yY9p/klM= github.com/libp2p/go-ws-transport v0.3.0/go.mod h1:bpgTJmRZAvVHrgHybCVyqoBmyLQ1fiZuEaBYusP5zsk= -github.com/libp2p/go-ws-transport v0.4.0 h1:9tvtQ9xbws6cA5LvqdE6Ne3vcmGB4f1z9SByggk4s0k= -github.com/libp2p/go-ws-transport v0.4.0/go.mod h1:EcIEKqf/7GDjth6ksuS/6p7R49V4CBY6/E7R/iyhYUA= +github.com/libp2p/go-ws-transport v0.5.0 h1:cO6x4P0v6PfxbKnxmf5cY2Ny4OPDGYkUqNvZzp/zdlo= +github.com/libp2p/go-ws-transport v0.5.0/go.mod h1:I2juo1dNTbl8BKSBYo98XY85kU2xds1iamArLvl8kNg= github.com/libp2p/go-yamux v1.2.2/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.0/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= github.com/libp2p/go-yamux v1.3.3/go.mod h1:FGTiPvoV/3DVdgWpX+tM0OW3tsM+W5bSE3gZwqQTcow= @@ -880,12 +875,13 @@ github.com/libp2p/go-yamux v1.4.1 h1:P1Fe9vF4th5JOxxgQvfbOHkrGqIZniTLf+ddhZp8YTI github.com/libp2p/go-yamux v1.4.1/go.mod h1:fr7aVgmdNGJK+N1g+b6DW6VxzbRCjCOejR/hkmpooHE= github.com/libp2p/go-yamux/v2 v2.2.0 h1:RwtpYZ2/wVviZ5+3pjC8qdQ4TKnrak0/E01N1UWoAFU= github.com/libp2p/go-yamux/v2 v2.2.0/go.mod h1:3So6P6TV6r75R9jiBpiIKgU/66lOarCZjqROGxzPpPQ= +github.com/libp2p/zeroconf/v2 v2.1.0/go.mod h1:vtRu3WOBoLRiQ3BhDvIJwvvrRakbTevCVLSr9/Ljess= github.com/lightstep/lightstep-tracer-common/golang/gogo v0.0.0-20190605223551-bc2310a04743/go.mod h1:qklhhLq1aX+mtWk9cPHPzaBjWImj5ULL6C7HFJtXQMM= github.com/lightstep/lightstep-tracer-go v0.18.1/go.mod h1:jlF1pusYV4pidLvZ+XD0UBX0ZE6WURAspgAczcDHrL4= github.com/logrusorgru/aurora v2.0.3+incompatible h1:tOpm7WcpBTn4fjmVfgpQq0EfczGlG91VSDkswnjF5A8= github.com/logrusorgru/aurora v2.0.3+incompatible/go.mod h1:7rIyQOR62GCctdiQpZ/zOJlFyk6y+94wXzv6RNZgaR4= -github.com/lucas-clemente/quic-go v0.19.3 h1:eCDQqvGBB+kCTkA0XrAFtNe81FMa0/fn4QSoeAbmiF4= -github.com/lucas-clemente/quic-go v0.19.3/go.mod h1:ADXpNbTQjq1hIzCpB+y/k5iz4n4z4IwqoLb94Kh5Hu8= +github.com/lucas-clemente/quic-go v0.21.2 h1:8LqqL7nBQFDUINadW0fHV/xSaCQJgmJC0Gv+qUnjd78= +github.com/lucas-clemente/quic-go v0.21.2/go.mod h1:vF5M1XqhBAHgbjKcJOXY3JZz3GP0T3FQhz/uyOUS38Q= github.com/lunixbochs/vtclean v0.0.0-20180621232353-2d01aacdc34a/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= github.com/lunixbochs/vtclean v1.0.0 h1:xu2sLAri4lGiovBDQKxl5mrXyESr3gUr5m5SM5+LVb8= github.com/lunixbochs/vtclean v1.0.0/go.mod h1:pHhQNgMf3btfWnGBVipUOjRYhoOsdGqdm/+2c2E2WMI= @@ -901,10 +897,13 @@ github.com/mailru/easyjson v0.0.0-20190626092158-b2ccc519800e/go.mod h1:C1wdFJiN github.com/manifoldco/promptui v0.7.0 h1:3l11YT8tm9MnwGFQ4kETwkzpAwY2Jt9lCrumCUW4+z4= github.com/manifoldco/promptui v0.7.0/go.mod h1:n4zTdgP0vr0S3w7/O/g98U+e0gwLScEXGwov2nIKuGQ= github.com/marten-seemann/qpack v0.2.1/go.mod h1:F7Gl5L1jIgN1D11ucXefiuJS9UMVP2opoCp2jDKb7wc= -github.com/marten-seemann/qtls v0.10.0 h1:ECsuYUKalRL240rRD4Ri33ISb7kAQ3qGDlrrl55b2pc= -github.com/marten-seemann/qtls v0.10.0/go.mod h1:UvMd1oaYDACI99/oZUYLzMCkBXQVT0aGm99sJhbT8hs= -github.com/marten-seemann/qtls-go1-15 v0.1.1 h1:LIH6K34bPVttyXnUWixk0bzH6/N07VxbSabxn5A5gZQ= -github.com/marten-seemann/qtls-go1-15 v0.1.1/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.4/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-15 v0.1.5 h1:Ci4EIUN6Rlb+D6GmLdej/bCQ4nPYNtVXQB+xjiXE1nk= +github.com/marten-seemann/qtls-go1-15 v0.1.5/go.mod h1:GyFwywLKkRt+6mfU99csTEY1joMZz5vmB1WNZH3P81I= +github.com/marten-seemann/qtls-go1-16 v0.1.4 h1:xbHbOGGhrenVtII6Co8akhLEdrawwB2iHl5yhJRpnco= +github.com/marten-seemann/qtls-go1-16 v0.1.4/go.mod h1:gNpI2Ol+lRS3WwSOtIUUtRwZEQMXjYK+dQSBFbethAk= +github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1 h1:/rpmWuGvceLwwWuaKPdjpR4JJEUH0tq64/I3hvzaNLM= +github.com/marten-seemann/qtls-go1-17 v0.1.0-rc.1/go.mod h1:fz4HIxByo+LlWcreM4CZOYNuz3taBQ8rN2X6FqvaWo8= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd h1:br0buuQ854V8u83wA0rVZ8ttrq5CpaPZdvrK0LP2lOk= github.com/marten-seemann/tcp v0.0.0-20210406111302-dfbc87cc63fd/go.mod h1:QuCEs1Nt24+FYQEqAAncTDPJIuGs+LxK1MCiFL25pMU= github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIGnZBoaSLZ3NAl1VTufbOrXBPvtcy6WiQ= @@ -1007,8 +1006,9 @@ github.com/multiformats/go-multiaddr v0.2.1/go.mod h1:s/Apk6IyxfvMjDafnhJgJ3/46z github.com/multiformats/go-multiaddr v0.2.2/go.mod h1:NtfXiOtHvghW9KojvtySjH5y0u0xW5UouOmQQrn6a3Y= github.com/multiformats/go-multiaddr v0.3.0/go.mod h1:dF9kph9wfJ+3VLAaeBqo9Of8x4fJxp6ggJGteB8HQTI= github.com/multiformats/go-multiaddr v0.3.1/go.mod h1:uPbspcUPd5AfaP6ql3ujFY+QWzmBD8uLLL4bXW0XfGc= -github.com/multiformats/go-multiaddr v0.3.3 h1:vo2OTSAqnENB2rLk79pLtr+uhj+VAzSe3uef5q0lRSs= github.com/multiformats/go-multiaddr v0.3.3/go.mod h1:lCKNGP1EQ1eZ35Za2wlqnabm9xQkib3fyB+nZXHLag0= +github.com/multiformats/go-multiaddr v0.4.0 h1:hL/K4ZJhJ5PTw3nwylq9lGU5yArzcAroZmex1ghSEkQ= +github.com/multiformats/go-multiaddr v0.4.0/go.mod h1:YcpyLH8ZPudLxQlemYBPhSm0/oCXAT8Z4mzFpyoPyRc= github.com/multiformats/go-multiaddr-dns v0.0.1/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.0.2/go.mod h1:9kWcqw/Pj6FwxAwW38n/9403szc57zJPs45fmnznu3Q= github.com/multiformats/go-multiaddr-dns v0.2.0/go.mod h1:TJ5pr5bBO7Y1B18djPuRsVkduhQH2YqYSbxWJzYGdK0= @@ -1024,7 +1024,6 @@ github.com/multiformats/go-multiaddr-net v0.1.2/go.mod h1:QsWt3XK/3hwvNxZJp92iMQ github.com/multiformats/go-multiaddr-net v0.1.3/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.1.4/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= github.com/multiformats/go-multiaddr-net v0.1.5/go.mod h1:ilNnaM9HbmVFqsb/qcNysjCu4PVONlrBZpHIrw/qQuA= -github.com/multiformats/go-multiaddr-net v0.2.0 h1:MSXRGN0mFymt6B1yo/6BPnIRpLPEnKgQNvVfCX5VDJk= github.com/multiformats/go-multiaddr-net v0.2.0/go.mod h1:gGdH3UXny6U3cKKYCvpXI5rnK7YaOIEOPVDI9tsJbEA= github.com/multiformats/go-multibase v0.0.1/go.mod h1:bja2MqRZ3ggyXtZSEDKpl0uO/gviWFaSteVbWT51qgs= github.com/multiformats/go-multibase v0.0.3 h1:l/B6bJDQjvQ5G52jw4QGSYeOTZoAwIO77RblWplfIqk= @@ -1063,8 +1062,9 @@ github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OS github.com/neelance/astrewrite v0.0.0-20160511093645-99348263ae86/go.mod h1:kHJEU3ofeGjhHklVoIGuVj85JJwZ6kWPaJwCIxgnFmo= github.com/neelance/sourcemap v0.0.0-20151028013722-8c68805598ab/go.mod h1:Qr6/a/Q4r9LP1IltGz7tA7iOK1WonHEYhu1HRBA7ZiM= github.com/nishanths/predeclared v0.0.0-20200524104333-86fad755b4d3/go.mod h1:nt3d53pc1VYcphSCIaYAJtnPYnr3Zyn8fMq2wvPGPso= -github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78= github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A= +github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE= +github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU= github.com/oklog/oklog v0.3.2/go.mod h1:FCV+B7mhrz4o+ueLpx+KqkyXRGMWOYEvfiXtdGtbWGs= github.com/oklog/run v1.0.0/go.mod h1:dlhp/R75TPv97u0XWUtDeV/lRKWPKSdTuV0TZvrmrQA= github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U= @@ -1079,8 +1079,10 @@ github.com/onsi/ginkgo v1.10.1/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+ github.com/onsi/ginkgo v1.11.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= github.com/onsi/ginkgo v1.12.0/go.mod h1:oUhWkIvk5aDxtKvDDuw8gItl8pKl42LzjC9KZE0HfGg= github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= -github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA= github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY= +github.com/onsi/ginkgo v1.16.2/go.mod h1:CObGmKUOKaSC0RjmoAK7tKyn4Azo5P2IWuoMnvwxz1E= +github.com/onsi/ginkgo v1.16.4 h1:29JGrr5oVBm5ulCWet69zQkzWipVXIol6ygQUe/EzNc= +github.com/onsi/ginkgo v1.16.4/go.mod h1:dX+/inL/fNMqNlz0e9LfyB9TswhZpCVdJM/Z6Vvnwo0= github.com/onsi/gomega v0.0.0-20170829124025-dcabb60a477c/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.1/go.mod h1:C1qb7wdrVGGVU+Z6iS04AVkA3Q65CEZX59MT0QO5uiA= github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= @@ -1088,8 +1090,9 @@ github.com/onsi/gomega v1.5.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1Cpa github.com/onsi/gomega v1.7.0/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.9.0/go.mod h1:Ho0h+IUsWyvy1OpqCwxlQ/21gkhVunqlU8fDGcoTdcA= -github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= +github.com/onsi/gomega v1.13.0 h1:7lLHu94wT9Ij0o6EWWclhu0aOh32VxhkwEJvzuWPeak= +github.com/onsi/gomega v1.13.0/go.mod h1:lRk9szgn8TxENtWd0Tp4c3wjlRfMTMH27I+3Je41yGY= github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk= github.com/openconfig/gnmi v0.0.0-20190823184014-89b2bf29312c/go.mod h1:t+O9It+LKzfOAhKTT5O0ehDix+MTqbtT0T9t+7zzOvc= github.com/openconfig/reference v0.0.0-20190727015836-8dfd928c9696/go.mod h1:ym2A+zigScwkSEb/cVQB0/ZMpU3rqiH6X7WRRsxgOGw= @@ -1169,8 +1172,8 @@ github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB8 github.com/prometheus/common v0.15.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.18.0/go.mod h1:U+gB1OBLb1lF3O42bTCL+FK18tX9Oar16Clt/msog/s= github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc= -github.com/prometheus/common v0.29.0 h1:3jqPBvKT4OHAbje2Ql7KeaaSicDBCxMYwEJU1zRJceE= -github.com/prometheus/common v0.29.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= +github.com/prometheus/common v0.30.0 h1:JEkYlQnpzrzQFxi6gnukFPdQ+ac82oRhzMcIduJu/Ug= +github.com/prometheus/common v0.30.0/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls= github.com/prometheus/procfs v0.0.0-20180725123919-05ee40e3a273/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= github.com/prometheus/procfs v0.0.0-20190117184657-bf6a532e95b1/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= @@ -1181,8 +1184,8 @@ github.com/prometheus/procfs v0.0.10/go.mod h1:7Qr8sr6344vo1JqZ6HhLceV9o3AJ1Ff+G github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.2.0/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU= github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= -github.com/prometheus/procfs v0.7.0 h1:OQZ41sZU9XkRpzrz8/TD0EldH/Rwbddkdu5wDyUwzfE= -github.com/prometheus/procfs v0.7.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= +github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU= +github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA= github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y= github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM= github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU= @@ -1199,8 +1202,8 @@ github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20210702154020-550e1cd83ec1 h1 github.com/prysmaticlabs/grpc-gateway/v2 v2.3.1-0.20210702154020-550e1cd83ec1/go.mod h1:IOyTYjcIO0rkmnGBfJTL0NJ11exy/Tc2QEuv7hCXp24= github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c h1:9PHRCuO/VN0s9k+RmLykho7AjDxblNYI5bYKed16NPU= github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c/go.mod h1:ZRws458tYHS/Zs936OQ6oCrL+Ict5O4Xpwve1UQ6C9M= -github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20210504233148-1e141af6a0a1 h1:k7CCMwN7VooQ7GhfySnaVyI4/9+QbhJTdasoC6VOZOI= -github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20210504233148-1e141af6a0a1/go.mod h1:au9l1XcWNEKixIlSRzEe54fYGhyELWgJJIxKu8W75Mc= +github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20211014160335-757fae4f38c6 h1:+jhXLjEYVW4qU2z5SOxlxN+Hv/A9FDf0HpfDurfMEz0= +github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20211014160335-757fae4f38c6/go.mod h1:ZVEbRdnMkGhp/pu35zq4SXxtvUwWK0J1MATtekZpH2Y= github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o= github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8= github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a/go.mod h1:bCqnVzQkZxMG4s8nGwiZ5l3QUCyqpo9Y+/ZMZ9VjZe4= @@ -1396,6 +1399,7 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw= go.opencensus.io v0.22.6/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M= go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E= +go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI= go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE= go.uber.org/atomic v1.5.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= @@ -1421,8 +1425,8 @@ go.uber.org/zap v1.13.0/go.mod h1:zwrFLgMcdUuIBviXEYEH1YKNaOBnKXsx2IPda5bBwHM= go.uber.org/zap v1.14.1/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.15.0/go.mod h1:Mb2vm2krFEG5DV0W9qcHBYFtp/Wku1cvYaqPsS/WYfc= go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= -go.uber.org/zap v1.18.1 h1:CSUJ2mjFszzEWt4CdKISEuChVIXGBn3lAPwkRGyVrc4= -go.uber.org/zap v1.18.1/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= +go.uber.org/zap v1.19.0 h1:mZQZefskPPCMIBCSEH0v2/iUqqLrYtaeqwD6FUGUnFE= +go.uber.org/zap v1.19.0/go.mod h1:xg/QME4nWcxGxrpdeYfq7UvYrLh66cuVKdrbD1XF/NI= go4.org v0.0.0-20180809161055-417644f6feb5/go.mod h1:MkTOUMDaeVYJUOUsaDXIhWPZYa1yOyC1qaOBpL57BhE= golang.org/x/build v0.0.0-20190111050920-041ab4dc3f9d/go.mod h1:OWs+y06UdEOHN4y+MfF/py+xQ/tYqIWW03b70/CG9Rw= golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -1455,6 +1459,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= +golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= +golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1555,9 +1561,11 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc= golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM= golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= +golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d h1:20cMwl2fHAzkJMEA+8J4JgqBQcQGzbisXo31MIeenXI= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= +golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1655,6 +1663,7 @@ golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20201214210602-f9fddec55a1e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20210112080510-489259a85091/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210303074136-134d130e1a04/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= @@ -1669,6 +1678,7 @@ golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7w golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210426080607-c94f62235c83/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= @@ -1685,8 +1695,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M= golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= +golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk= +golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/time v0.0.0-20170424234030-8be79e1e0910/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= @@ -1756,6 +1767,7 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA= golang.org/x/tools v0.0.0-20200904185747-39188db58858/go.mod h1:Cj7w3i3Rnn0Xh82ur9kSqwfTHTeVxaDqrfMjpcNT6bE= +golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0= golang.org/x/tools v0.1.1 h1:wGiQel/hW0NnEkJUk8lbzkX2gFJU6PFxf1v5OlCfuOs= @@ -1838,6 +1850,7 @@ google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfG google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= +google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c= google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA= @@ -1870,11 +1883,14 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak= +google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0= google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc= google.golang.org/grpc v1.35.0-dev.0.20201218190559-666aea1fb34c/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= +google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU= -google.golang.org/grpc v1.37.0 h1:uSZWeQJX5j11bIQ4AJoj+McDBo29cY1MCoC1wO3ts+c= google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM= +google.golang.org/grpc v1.40.0 h1:AGJ0Ih4mHjSeibYkFGh1dD9KJ/eOtZ93I6hoHhukQ5Q= +google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34= google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.0.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= diff --git a/third_party/libp2p_tls.patch b/third_party/libp2p_tls.patch deleted file mode 100644 index 63fafa0b86..0000000000 --- a/third_party/libp2p_tls.patch +++ /dev/null @@ -1,16 +0,0 @@ -diff --git a/crypto.go b/crypto.go -index e6d6d5f..f9eb389 100644 ---- a/crypto.go -+++ b/crypto.go -@@ -217,10 +217,7 @@ func preferServerCipherSuites() bool { - var ( - hasGCMAsmAMD64 = cpu.X86.HasAES && cpu.X86.HasPCLMULQDQ - hasGCMAsmARM64 = cpu.ARM64.HasAES && cpu.ARM64.HasPMULL -- // Keep in sync with crypto/aes/cipher_s390x.go. -- hasGCMAsmS390X = cpu.S390X.HasAES && cpu.S390X.HasAESCBC && cpu.S390X.HasAESCTR && (cpu.S390X.HasGHASH || cpu.S390X.HasAESGCM) -- -- hasGCMAsm = hasGCMAsmAMD64 || hasGCMAsmARM64 || hasGCMAsmS390X -+ hasGCMAsm = hasGCMAsmAMD64 || hasGCMAsmARM64 - ) - return !hasGCMAsm - } diff --git a/validator/client/multiple_endpoints_grpc_resolver.go b/validator/client/multiple_endpoints_grpc_resolver.go index ddcf432e4f..75899c0774 100644 --- a/validator/client/multiple_endpoints_grpc_resolver.go +++ b/validator/client/multiple_endpoints_grpc_resolver.go @@ -40,7 +40,9 @@ func (r *multipleEndpointsGrpcResolver) start() { for _, endpoint := range endpoints { addrs = append(addrs, resolver.Address{Addr: endpoint}) } - r.cc.UpdateState(resolver.State{Addresses: addrs}) + if err := r.cc.UpdateState(resolver.State{Addresses: addrs}); err != nil { + log.WithError(err).Error("Failed to update grpc connection state") + } } // ResolveNow -- From 236a5c4167b7d4b2429536b64ae2b6a20dc59ffe Mon Sep 17 00:00:00 2001 From: Nishant Das Date: Thu, 2 Dec 2021 02:56:07 +0800 Subject: [PATCH 41/45] Cleanup From Deepsource (#9961) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * ds cleanup * fix Co-authored-by: RadosÅ‚aw Kapka Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> --- .../core/blocks/block_operations_fuzz_test.go | 10 +++--- .../core/helpers/rewards_penalties.go | 2 +- beacon-chain/db/slasherkv/pruning.go | 4 +-- beacon-chain/db/slasherkv/slasher.go | 2 +- beacon-chain/operations/slashings/mock.go | 4 +-- .../p2p/gossip_scoring_params_test.go | 2 +- .../rpc/prysm/v1alpha1/beacon/blocks.go | 32 +++++++++---------- beacon-chain/slasher/mock_slashing_checker.go | 4 +-- beacon-chain/state/v1/unsupported_setters.go | 6 ++-- beacon-chain/state/v2/deprecated_setters.go | 6 ++-- beacon-chain/state/v3/deprecated_setters.go | 6 ++-- beacon-chain/state/v3/state_trie.go | 4 +-- .../sync/initial-sync/blocks_fetcher_test.go | 10 +++--- tools/deployContract/deployContract.go | 3 +- validator/keymanager/imported/import.go | 6 ++-- validator/rpc/wallet.go | 2 +- .../slashing-protection-history/import.go | 4 +-- validator/testing/mock_slasher.go | 2 +- 18 files changed, 52 insertions(+), 57 deletions(-) diff --git a/beacon-chain/core/blocks/block_operations_fuzz_test.go b/beacon-chain/core/blocks/block_operations_fuzz_test.go index 6e18710e44..e2b76fbe21 100644 --- a/beacon-chain/core/blocks/block_operations_fuzz_test.go +++ b/beacon-chain/core/blocks/block_operations_fuzz_test.go @@ -47,7 +47,7 @@ func TestFuzzProcessBlockHeader_10000(t *testing.T) { } } -func TestFuzzverifyDepositDataSigningRoot_10000(t *testing.T) { +func TestFuzzverifyDepositDataSigningRoot_10000(_ *testing.T) { fuzzer := fuzz.NewWithSeed(0) var ba []byte pubkey := [48]byte{} @@ -85,7 +85,7 @@ func TestFuzzProcessEth1DataInBlock_10000(t *testing.T) { } } -func TestFuzzareEth1DataEqual_10000(t *testing.T) { +func TestFuzzareEth1DataEqual_10000(_ *testing.T) { fuzzer := fuzz.NewWithSeed(0) eth1data := ð.Eth1Data{} eth1data2 := ð.Eth1Data{} @@ -227,7 +227,7 @@ func TestFuzzVerifyAttesterSlashing_10000(t *testing.T) { } } -func TestFuzzIsSlashableAttestationData_10000(t *testing.T) { +func TestFuzzIsSlashableAttestationData_10000(_ *testing.T) { fuzzer := fuzz.NewWithSeed(0) attestationData := ð.AttestationData{} attestationData2 := ð.AttestationData{} @@ -239,7 +239,7 @@ func TestFuzzIsSlashableAttestationData_10000(t *testing.T) { } } -func TestFuzzslashableAttesterIndices_10000(t *testing.T) { +func TestFuzzslashableAttesterIndices_10000(_ *testing.T) { fuzzer := fuzz.NewWithSeed(0) attesterSlashing := ð.AttesterSlashing{} @@ -397,7 +397,7 @@ func TestFuzzProcessVoluntaryExitsNoVerify_10000(t *testing.T) { } } -func TestFuzzVerifyExit_10000(t *testing.T) { +func TestFuzzVerifyExit_10000(_ *testing.T) { fuzzer := fuzz.NewWithSeed(0) ve := ð.SignedVoluntaryExit{} rawVal := ðpb.Validator{} diff --git a/beacon-chain/core/helpers/rewards_penalties.go b/beacon-chain/core/helpers/rewards_penalties.go index d7b9b2afd0..2c169dddd4 100644 --- a/beacon-chain/core/helpers/rewards_penalties.go +++ b/beacon-chain/core/helpers/rewards_penalties.go @@ -59,7 +59,7 @@ func TotalActiveBalance(s state.ReadOnlyBeaconState) (uint64, error) { case err == nil: return bal, nil case errors.Is(err, cache.ErrNotFound): - break + // Do nothing if we receive a not found error. default: // In the event, we encounter another error we return it. return 0, err diff --git a/beacon-chain/db/slasherkv/pruning.go b/beacon-chain/db/slasherkv/pruning.go index 27cf1eca6e..1a6be7ffcc 100644 --- a/beacon-chain/db/slasherkv/pruning.go +++ b/beacon-chain/db/slasherkv/pruning.go @@ -14,7 +14,7 @@ import ( // PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch // less than or equal to the specified epoch. func (s *Store) PruneAttestationsAtEpoch( - ctx context.Context, maxEpoch types.Epoch, + _ context.Context, maxEpoch types.Epoch, ) (numPruned uint, err error) { // We can prune everything less than the current epoch - history length. encodedEndPruneEpoch := fssz.MarshalUint64([]byte{}, uint64(maxEpoch)) @@ -85,7 +85,7 @@ func (s *Store) PruneAttestationsAtEpoch( // PruneProposalsAtEpoch deletes all proposals from the slasher DB with epoch // less than or equal to the specified epoch. func (s *Store) PruneProposalsAtEpoch( - ctx context.Context, maxEpoch types.Epoch, + _ context.Context, maxEpoch types.Epoch, ) (numPruned uint, err error) { var endPruneSlot types.Slot endPruneSlot, err = slots.EpochEnd(maxEpoch) diff --git a/beacon-chain/db/slasherkv/slasher.go b/beacon-chain/db/slasherkv/slasher.go index b380086b73..5a48823dc5 100644 --- a/beacon-chain/db/slasherkv/slasher.go +++ b/beacon-chain/db/slasherkv/slasher.go @@ -394,7 +394,7 @@ func (s *Store) SaveBlockProposals( // HighestAttestations retrieves the last attestation data from the database for all indices. func (s *Store) HighestAttestations( - ctx context.Context, + _ context.Context, indices []types.ValidatorIndex, ) ([]*slashpb.HighestAttestation, error) { if len(indices) == 0 { diff --git a/beacon-chain/operations/slashings/mock.go b/beacon-chain/operations/slashings/mock.go index 47046ce83a..b5042e2c11 100644 --- a/beacon-chain/operations/slashings/mock.go +++ b/beacon-chain/operations/slashings/mock.go @@ -36,11 +36,11 @@ func (m *PoolMock) InsertProposerSlashing(_ context.Context, _ state.ReadOnlyBea } // MarkIncludedAttesterSlashing -- -func (_ *PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) { +func (*PoolMock) MarkIncludedAttesterSlashing(_ *ethpb.AttesterSlashing) { panic("implement me") } // MarkIncludedProposerSlashing -- -func (_ *PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) { +func (*PoolMock) MarkIncludedProposerSlashing(_ *ethpb.ProposerSlashing) { panic("implement me") } diff --git a/beacon-chain/p2p/gossip_scoring_params_test.go b/beacon-chain/p2p/gossip_scoring_params_test.go index e8ac1a721f..c66824198f 100644 --- a/beacon-chain/p2p/gossip_scoring_params_test.go +++ b/beacon-chain/p2p/gossip_scoring_params_test.go @@ -63,7 +63,7 @@ func TestCorrect_ActiveValidatorsCount(t *testing.T) { assert.Equal(t, int(params.BeaconConfig().MinGenesisActiveValidatorCount)+100, int(vals), "mainnet genesis active count isn't accurate") } -func TestLoggingParameters(t *testing.T) { +func TestLoggingParameters(_ *testing.T) { logGossipParameters("testing", nil) logGossipParameters("testing", &pubsub.TopicScoreParams{}) // Test out actual gossip parameters. diff --git a/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks.go b/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks.go index 78356e86ae..81cbc271c3 100644 --- a/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks.go +++ b/beacon-chain/rpc/prysm/v1alpha1/beacon/blocks.go @@ -48,7 +48,7 @@ func (bs *Server) ListBlocks( switch q := req.QueryFilter.(type) { case *ethpb.ListBlocksRequest_Epoch: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForEpoch(ctx, req, q) if err != nil { return nil, err } @@ -63,7 +63,7 @@ func (bs *Server) ListBlocks( NextPageToken: nextPageToken, }, nil case *ethpb.ListBlocksRequest_Root: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForRoot(ctx, req, q) if err != nil { return nil, err } @@ -79,7 +79,7 @@ func (bs *Server) ListBlocks( }, nil case *ethpb.ListBlocksRequest_Slot: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForSlot(ctx, req, q) if err != nil { return nil, err } @@ -94,7 +94,7 @@ func (bs *Server) ListBlocks( NextPageToken: nextPageToken, }, nil case *ethpb.ListBlocksRequest_Genesis: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForGenesis(ctx, req, q) if err != nil { return nil, err } @@ -128,7 +128,7 @@ func (bs *Server) ListBeaconBlocks( switch q := req.QueryFilter.(type) { case *ethpb.ListBlocksRequest_Epoch: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForEpoch(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForEpoch(ctx, req, q) if err != nil { return nil, err } @@ -142,7 +142,7 @@ func (bs *Server) ListBeaconBlocks( NextPageToken: nextPageToken, }, nil case *ethpb.ListBlocksRequest_Root: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForRoot(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForRoot(ctx, req, q) if err != nil { return nil, err } @@ -157,7 +157,7 @@ func (bs *Server) ListBeaconBlocks( }, nil case *ethpb.ListBlocksRequest_Slot: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForSlot(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForSlot(ctx, req, q) if err != nil { return nil, err } @@ -171,7 +171,7 @@ func (bs *Server) ListBeaconBlocks( NextPageToken: nextPageToken, }, nil case *ethpb.ListBlocksRequest_Genesis: - ctrs, numBlks, nextPageToken, err := bs.ListBlocksForGenesis(ctx, req, q) + ctrs, numBlks, nextPageToken, err := bs.listBlocksForGenesis(ctx, req, q) if err != nil { return nil, err } @@ -224,8 +224,8 @@ func convertToBlockContainer(blk block.SignedBeaconBlock, root [32]byte, isCanon return ctr, nil } -// ListBlocksForEpoch retrieves all blocks for the provided epoch. -func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]blockContainer, int, string, error) { +// listBlocksForEpoch retrieves all blocks for the provided epoch. +func (bs *Server) listBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Epoch) ([]blockContainer, int, string, error) { blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch)) if err != nil { return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not get blocks: %v", err) @@ -262,8 +262,8 @@ func (bs *Server) ListBlocksForEpoch(ctx context.Context, req *ethpb.ListBlocksR return containers, numBlks, nextPageToken, nil } -// ListBlocksForRoot retrieves the block for the provided root. -func (bs *Server) ListBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]blockContainer, int, string, error) { +// listBlocksForRoot retrieves the block for the provided root. +func (bs *Server) listBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Root) ([]blockContainer, int, string, error) { blk, err := bs.BeaconDB.Block(ctx, bytesutil.ToBytes32(q.Root)) if err != nil { return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve block: %v", err) @@ -286,8 +286,8 @@ func (bs *Server) ListBlocksForRoot(ctx context.Context, _ *ethpb.ListBlocksRequ }}, 1, strconv.Itoa(0), nil } -// ListBlocksForSlot retrieves all blocks for the provided slot. -func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) { +// listBlocksForSlot retrieves all blocks for the provided slot. +func (bs *Server) listBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRequest, q *ethpb.ListBlocksRequest_Slot) ([]blockContainer, int, string, error) { hasBlocks, blks, err := bs.BeaconDB.BlocksBySlot(ctx, q.Slot) if err != nil { return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for slot %d: %v", q.Slot, err) @@ -323,8 +323,8 @@ func (bs *Server) ListBlocksForSlot(ctx context.Context, req *ethpb.ListBlocksRe return containers, numBlks, nextPageToken, nil } -// ListBlocksForGenesis retrieves the genesis block. -func (bs *Server) ListBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksRequest, _ *ethpb.ListBlocksRequest_Genesis) ([]blockContainer, int, string, error) { +// listBlocksForGenesis retrieves the genesis block. +func (bs *Server) listBlocksForGenesis(ctx context.Context, _ *ethpb.ListBlocksRequest, _ *ethpb.ListBlocksRequest_Genesis) ([]blockContainer, int, string, error) { genBlk, err := bs.BeaconDB.GenesisBlock(ctx) if err != nil { return nil, 0, strconv.Itoa(0), status.Errorf(codes.Internal, "Could not retrieve blocks for genesis slot: %v", err) diff --git a/beacon-chain/slasher/mock_slashing_checker.go b/beacon-chain/slasher/mock_slashing_checker.go index 43efdcab35..cbec22ae40 100644 --- a/beacon-chain/slasher/mock_slashing_checker.go +++ b/beacon-chain/slasher/mock_slashing_checker.go @@ -28,7 +28,7 @@ func (s *MockSlashingChecker) HighestAttestations( return atts, nil } -func (s *MockSlashingChecker) IsSlashableBlock(ctx context.Context, proposal *ethpb.SignedBeaconBlockHeader) (*ethpb.ProposerSlashing, error) { +func (s *MockSlashingChecker) IsSlashableBlock(_ context.Context, _ *ethpb.SignedBeaconBlockHeader) (*ethpb.ProposerSlashing, error) { if s.ProposerSlashingFound { return ðpb.ProposerSlashing{ Header_1: ðpb.SignedBeaconBlockHeader{ @@ -56,7 +56,7 @@ func (s *MockSlashingChecker) IsSlashableBlock(ctx context.Context, proposal *et return nil, nil } -func (s *MockSlashingChecker) IsSlashableAttestation(ctx context.Context, attestation *ethpb.IndexedAttestation) ([]*ethpb.AttesterSlashing, error) { +func (s *MockSlashingChecker) IsSlashableAttestation(_ context.Context, _ *ethpb.IndexedAttestation) ([]*ethpb.AttesterSlashing, error) { if s.AttesterSlashingFound { return []*ethpb.AttesterSlashing{ { diff --git a/beacon-chain/state/v1/unsupported_setters.go b/beacon-chain/state/v1/unsupported_setters.go index c4e3b4a63d..40cfeeab6a 100644 --- a/beacon-chain/state/v1/unsupported_setters.go +++ b/beacon-chain/state/v1/unsupported_setters.go @@ -6,17 +6,17 @@ import ( ) // AppendCurrentParticipationBits is not supported for phase 0 beacon state. -func (b *BeaconState) AppendCurrentParticipationBits(val byte) error { +func (*BeaconState) AppendCurrentParticipationBits(_ byte) error { return errors.New("AppendCurrentParticipationBits is not supported for phase 0 beacon state") } // AppendPreviousParticipationBits is not supported for phase 0 beacon state. -func (b *BeaconState) AppendPreviousParticipationBits(val byte) error { +func (*BeaconState) AppendPreviousParticipationBits(_ byte) error { return errors.New("AppendPreviousParticipationBits is not supported for phase 0 beacon state") } // AppendInactivityScore is not supported for phase 0 beacon state. -func (b *BeaconState) AppendInactivityScore(s uint64) error { +func (*BeaconState) AppendInactivityScore(_ uint64) error { return errors.New("AppendInactivityScore is not supported for phase 0 beacon state") } diff --git a/beacon-chain/state/v2/deprecated_setters.go b/beacon-chain/state/v2/deprecated_setters.go index 7125a7cd2c..69dfce48d0 100644 --- a/beacon-chain/state/v2/deprecated_setters.go +++ b/beacon-chain/state/v2/deprecated_setters.go @@ -6,17 +6,17 @@ import ( ) // SetPreviousEpochAttestations is not supported for HF1 beacon state. -func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error { +func (b *BeaconState) SetPreviousEpochAttestations(_ []*ethpb.PendingAttestation) error { return errors.New("SetPreviousEpochAttestations is not supported for hard fork 1 beacon state") } // SetCurrentEpochAttestations is not supported for HF1 beacon state. -func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error { +func (b *BeaconState) SetCurrentEpochAttestations(_ []*ethpb.PendingAttestation) error { return errors.New("SetCurrentEpochAttestations is not supported for hard fork 1 beacon state") } // AppendCurrentEpochAttestations is not supported for HF1 beacon state. -func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error { +func (b *BeaconState) AppendCurrentEpochAttestations(_ *ethpb.PendingAttestation) error { return errors.New("AppendCurrentEpochAttestations is not supported for hard fork 1 beacon state") } diff --git a/beacon-chain/state/v3/deprecated_setters.go b/beacon-chain/state/v3/deprecated_setters.go index e99b23993d..e4d9a24b1c 100644 --- a/beacon-chain/state/v3/deprecated_setters.go +++ b/beacon-chain/state/v3/deprecated_setters.go @@ -6,17 +6,17 @@ import ( ) // SetPreviousEpochAttestations is not supported for HF1 beacon state. -func (b *BeaconState) SetPreviousEpochAttestations(val []*ethpb.PendingAttestation) error { +func (*BeaconState) SetPreviousEpochAttestations(_ []*ethpb.PendingAttestation) error { return errors.New("SetPreviousEpochAttestations is not supported for version Merge beacon state") } // SetCurrentEpochAttestations is not supported for HF1 beacon state. -func (b *BeaconState) SetCurrentEpochAttestations(val []*ethpb.PendingAttestation) error { +func (*BeaconState) SetCurrentEpochAttestations(_ []*ethpb.PendingAttestation) error { return errors.New("SetCurrentEpochAttestations is not supported for version Merge beacon state") } // AppendCurrentEpochAttestations is not supported for HF1 beacon state. -func (b *BeaconState) AppendCurrentEpochAttestations(val *ethpb.PendingAttestation) error { +func (*BeaconState) AppendCurrentEpochAttestations(_ *ethpb.PendingAttestation) error { return errors.New("AppendCurrentEpochAttestations is not supported for version Merge beacon state") } diff --git a/beacon-chain/state/v3/state_trie.go b/beacon-chain/state/v3/state_trie.go index c34571f85f..08604d74cc 100644 --- a/beacon-chain/state/v3/state_trie.go +++ b/beacon-chain/state/v3/state_trie.go @@ -219,7 +219,7 @@ func (b *BeaconState) HashTreeRoot(ctx context.Context) ([32]byte, error) { } for field := range b.dirtyFields { - root, err := b.rootSelector(ctx, field) + root, err := b.rootSelector(field) if err != nil { return [32]byte{}, err } @@ -256,7 +256,7 @@ func (b *BeaconState) IsNil() bool { return b == nil || b.state == nil } -func (b *BeaconState) rootSelector(ctx context.Context, field types.FieldIndex) ([32]byte, error) { +func (b *BeaconState) rootSelector(field types.FieldIndex) ([32]byte, error) { hasher := hash.CustomSHA256Hasher() switch field { case genesisTime: diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index 896d41372e..ed382b5869 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -717,10 +717,9 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) blk.Block.Slot = req.StartSlot - 1 assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk))) break - } else { - blk.Block.Slot = i - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk))) } + blk.Block.Slot = i + assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk))) } } }, @@ -749,10 +748,9 @@ func TestBlocksFetcher_requestBlocksFromPeerReturningInvalidBlocks(t *testing.T) blk.Block.Slot = req.StartSlot.Add(req.Count * req.Step) assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk))) break - } else { - blk.Block.Slot = i - assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk))) } + blk.Block.Slot = i + assert.NoError(t, beaconsync.WriteBlockChunk(stream, chain, p1.Encoding(), wrapper.WrappedPhase0SignedBeaconBlock(blk))) } } }, diff --git a/tools/deployContract/deployContract.go b/tools/deployContract/deployContract.go index f86733a2d5..05bfc1e950 100644 --- a/tools/deployContract/deployContract.go +++ b/tools/deployContract/deployContract.go @@ -174,9 +174,8 @@ func main() { if k8sConfigMapName != "" { if err := updateKubernetesConfigMap(context.Background(), addr.Hex()); err != nil { log.Fatalf("Failed to update kubernetes config map: %v", err) - } else { - log.Printf("Updated config map %s", k8sConfigMapName) } + log.Printf("Updated config map %s", k8sConfigMapName) } return nil } diff --git a/validator/keymanager/imported/import.go b/validator/keymanager/imported/import.go index 9a78ebdd7d..45f88b6584 100644 --- a/validator/keymanager/imported/import.go +++ b/validator/keymanager/imported/import.go @@ -28,10 +28,8 @@ func (km *Keymanager) ImportKeystores( return nil, errors.New("no passwords provided for keystores") } else if len(passwords) == 1 { singlePasswordForAll = passwords[0] - } else { - if len(passwords) != len(keystores) { - return nil, errors.New("number of passwords does not match number of keystores") - } + } else if len(passwords) != len(keystores) { + return nil, errors.New("number of passwords does not match number of keystores") } decryptor := keystorev4.New() diff --git a/validator/rpc/wallet.go b/validator/rpc/wallet.go index 043a3c3203..b978b242ab 100644 --- a/validator/rpc/wallet.go +++ b/validator/rpc/wallet.go @@ -216,7 +216,7 @@ func (s *Server) RecoverWallet(ctx context.Context, req *pb.RecoverWalletRequest // we return an empty response with no error. If the password is incorrect for a single keystore, // we return an appropriate error. func (s *Server) ValidateKeystores( - ctx context.Context, req *pb.ValidateKeystoresRequest, + _ context.Context, req *pb.ValidateKeystoresRequest, ) (*emptypb.Empty, error) { if req.KeystoresPassword == "" { return nil, status.Error(codes.InvalidArgument, "Password required for keystores") diff --git a/validator/slashing-protection-history/import.go b/validator/slashing-protection-history/import.go index 8caa0f01a0..94e4764f4a 100644 --- a/validator/slashing-protection-history/import.go +++ b/validator/slashing-protection-history/import.go @@ -235,7 +235,7 @@ func parseAttestationsForUniquePublicKeys(data []*format.ProtectionData) (map[[4 return signedAttestationsByPubKey, nil } -func filterSlashablePubKeysFromBlocks(ctx context.Context, historyByPubKey map[[48]byte]kv.ProposalHistoryForPubkey) [][48]byte { +func filterSlashablePubKeysFromBlocks(_ context.Context, historyByPubKey map[[48]byte]kv.ProposalHistoryForPubkey) [][48]byte { // Given signing roots are optional in the EIP standard, we behave as follows: // For a given block: // If we have a previous block with the same slot in our history: @@ -311,7 +311,7 @@ func filterSlashablePubKeysFromAttestations( return slashablePubKeys, nil } -func transformSignedBlocks(ctx context.Context, signedBlocks []*format.SignedBlock) (*kv.ProposalHistoryForPubkey, error) { +func transformSignedBlocks(_ context.Context, signedBlocks []*format.SignedBlock) (*kv.ProposalHistoryForPubkey, error) { proposals := make([]kv.Proposal, len(signedBlocks)) for i, proposal := range signedBlocks { slot, err := SlotFromString(proposal.Slot) diff --git a/validator/testing/mock_slasher.go b/validator/testing/mock_slasher.go index 7944976508..f421afe6cc 100644 --- a/validator/testing/mock_slasher.go +++ b/validator/testing/mock_slasher.go @@ -18,7 +18,7 @@ type MockSlasher struct { } // HighestAttestations will return an empty array of attestations. -func (_ MockSlasher) HighestAttestations(ctx context.Context, req *eth.HighestAttestationRequest, _ ...grpc.CallOption) (*eth.HighestAttestationResponse, error) { +func (MockSlasher) HighestAttestations(ctx context.Context, req *eth.HighestAttestationRequest, _ ...grpc.CallOption) (*eth.HighestAttestationResponse, error) { return ð.HighestAttestationResponse{ Attestations: nil, }, nil From ab60b1c7b245f322516725baac1a5523b5fb9d64 Mon Sep 17 00:00:00 2001 From: Preston Van Loon Date: Wed, 1 Dec 2021 14:31:21 -0500 Subject: [PATCH 42/45] Update go-ethereum to v1.10.13 (#9967) Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com> --- deps.bzl | 22 ++++++++-------------- go.mod | 6 +++--- go.sum | 16 ++++++++-------- 3 files changed, 19 insertions(+), 25 deletions(-) diff --git a/deps.bzl b/deps.bzl index d72a58e3b6..2d7d77da81 100644 --- a/deps.bzl +++ b/deps.bzl @@ -825,8 +825,8 @@ def prysm_deps(): importpath = "github.com/ethereum/go-ethereum", patch_args = ["-p1"], patches = ["//third_party:com_github_ethereum_go_ethereum_secp256k1.patch"], - sum = "h1:Mi7op8Vnhq9L2jpczrDzPm6c9XZbvHu0h4hoDq9u7QM=", - version = "v1.10.11-0.20211018203420-b97f57882c14", + sum = "h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA=", + version = "v1.10.13", ) go_repository( @@ -1656,12 +1656,6 @@ def prysm_deps(): version = "v0.4.2", ) - go_repository( - name = "com_github_ipfs_go_ipfs_addr", - importpath = "github.com/ipfs/go-ipfs-addr", - sum = "h1:DpDFybnho9v3/a1dzJ5KnWdThWD1HrFLpQ+tWIyBaFI=", - version = "v0.0.1", - ) go_repository( name = "com_github_ipfs_go_ipfs_delay", importpath = "github.com/ipfs/go-ipfs-delay", @@ -4197,8 +4191,8 @@ def prysm_deps(): go_repository( name = "org_golang_x_crypto", importpath = "golang.org/x/crypto", - sum = "h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg=", - version = "v0.0.0-20210921155107-089bfa567519", + sum = "h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI=", + version = "v0.0.0-20211117183948-ae814b36b871", ) go_repository( name = "org_golang_x_exp", @@ -4236,8 +4230,8 @@ def prysm_deps(): go_repository( name = "org_golang_x_net", importpath = "golang.org/x/net", - sum = "h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c=", - version = "v0.0.0-20210813160813-60bc85c4be6d", + sum = "h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE=", + version = "v0.0.0-20211112202133-69e39bad7dc2", ) go_repository( name = "org_golang_x_oauth2", @@ -4261,8 +4255,8 @@ def prysm_deps(): go_repository( name = "org_golang_x_sys", importpath = "golang.org/x/sys", - sum = "h1:KzbpndAYEM+4oHRp9JmB2ewj0NHHxO3Z0g7Gus2O1kk=", - version = "v0.0.0-20211015200801-69063c4bb744", + sum = "h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc=", + version = "v0.0.0-20211124211545-fe61309f8881", ) go_repository( name = "org_golang_x_term", diff --git a/go.mod b/go.mod index e4aa47b358..1bede80a3c 100644 --- a/go.mod +++ b/go.mod @@ -10,7 +10,7 @@ require ( github.com/dgraph-io/ristretto v0.0.4-0.20210318174700-74754f61e018 github.com/dustin/go-humanize v1.0.0 github.com/emicklei/dot v0.11.0 - github.com/ethereum/go-ethereum v1.10.11-0.20211018203420-b97f57882c14 + github.com/ethereum/go-ethereum v1.10.13 github.com/ferranbt/fastssz v0.0.0-20210905181407-59cf6761a7d5 github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 github.com/fsnotify/fsnotify v1.4.9 @@ -81,7 +81,7 @@ require ( go.etcd.io/bbolt v1.3.5 go.opencensus.io v0.23.0 go.uber.org/automaxprocs v1.3.0 - golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 + golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 golang.org/x/exp v0.0.0-20200513190911-00229845015e golang.org/x/sync v0.0.0-20210220032951-036812b2e83c golang.org/x/tools v0.1.1 @@ -105,7 +105,7 @@ require ( github.com/go-ole/go-ole v1.2.5 // indirect github.com/peterh/liner v1.2.0 // indirect github.com/prometheus/tsdb v0.10.0 // indirect - golang.org/x/sys v0.0.0-20211015200801-69063c4bb744 // indirect + golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 // indirect google.golang.org/api v0.34.0 // indirect google.golang.org/appengine v1.6.7 // indirect k8s.io/apimachinery v0.18.3 diff --git a/go.sum b/go.sum index e538a07ce0..bfb3f5dcf9 100644 --- a/go.sum +++ b/go.sum @@ -269,8 +269,8 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk= github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ= github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c= -github.com/ethereum/go-ethereum v1.10.11-0.20211018203420-b97f57882c14 h1:Mi7op8Vnhq9L2jpczrDzPm6c9XZbvHu0h4hoDq9u7QM= -github.com/ethereum/go-ethereum v1.10.11-0.20211018203420-b97f57882c14/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= +github.com/ethereum/go-ethereum v1.10.13 h1:DEYFP9zk+Gruf3ae1JOJVhNmxK28ee+sMELPLgYTXpA= +github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw= github.com/evanphx/json-patch v4.2.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4= github.com/fatih/color v1.9.0 h1:8xPHl4/q1VyqGIPif1F+1V3Y3lSmrq01EabUW3CoW5s= @@ -1461,8 +1461,8 @@ golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWP golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4= golang.org/x/crypto v0.0.0-20210506145944-38f3c27a63bf/go.mod h1:P+XmwS30IXTQdn5tA2iutPOUgjI07+tq3H3K9MVA1s8= golang.org/x/crypto v0.0.0-20210813211128-0a44fdfbc16e/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519 h1:7I4JAnoQBe7ZtJcBaYHi5UtiO8tQHbUSXxL+pnGRANg= -golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871 h1:/pEO3GD/ABYAjuakUS6xSEmmlyVS4kxBNkeA9tLJiTI= +golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4= golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA= @@ -1564,8 +1564,9 @@ golang.org/x/net v0.0.0-20210423184538-5f58ad60dda6/go.mod h1:OJAsFXCWl8Ukc7SiCT golang.org/x/net v0.0.0-20210428140749-89ef3d95e781/go.mod h1:OJAsFXCWl8Ukc7SiCT/9KSuxbyM7479/AVlXFRxuMCk= golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= -golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d h1:LO7XpTYMwTqxjLcGWPijK3vRXg1aWdlNOVOHRq45d7c= golang.org/x/net v0.0.0-20210813160813-60bc85c4be6d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2 h1:CIJ76btIcR3eFI5EgSo6k1qKw9KJexJuRLI9G7Hp5wE= +golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y= golang.org/x/oauth2 v0.0.0-20170912212905-13449ad91cb2/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= golang.org/x/oauth2 v0.0.0-20181017192945-9dcd33a902f4/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= @@ -1682,8 +1683,8 @@ golang.org/x/sys v0.0.0-20210511113859-b0526f3d8744/go.mod h1:oPkhp1MJrh7nUepCBc golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= -golang.org/x/sys v0.0.0-20211015200801-69063c4bb744 h1:KzbpndAYEM+4oHRp9JmB2ewj0NHHxO3Z0g7Gus2O1kk= -golang.org/x/sys v0.0.0-20211015200801-69063c4bb744/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881 h1:TyHqChC80pFkXWraUUf6RuB5IqFdQieMLwwCJokV2pc= +golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1 h1:v+OssWQX+hTHEmOBgwxdZxK4zHq3yOs8F9J7mk0PY8E= golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo= @@ -1933,7 +1934,6 @@ gopkg.in/jcmturner/gokrb5.v7 v7.5.0/go.mod h1:l8VISx+WGYp+Fp7KRbsiUuXTTOnxIc3Tuv gopkg.in/jcmturner/rpc.v1 v1.1.0/go.mod h1:YIdkC4XfD6GXbzje11McwsDuOlZQSb9W4vfLvuNnlv8= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU= gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c= -gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6 h1:a6cXbcDDUkSBlpnkWV1bJ+vv3mOgQEltEJ2rPxroVu0= gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns= gopkg.in/redis.v4 v4.2.4/go.mod h1:8KREHdypkCEojGKQcjMqAODMICIVwZAONWq8RowTITA= gopkg.in/resty.v1 v1.12.0/go.mod h1:mDo4pnntr5jdWRML875a/NmxYqAlA73dVijT2AXvQQo= From 790bf031234f5bbd734fdb23f892e1041d293e4c Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 1 Dec 2021 18:34:53 -0500 Subject: [PATCH 43/45] Replace a Few IntFlags with Uint64Flags (#9959) * use uints instead of ints * fix method * fix * fix * builds * deepsource * deep source --- beacon-chain/node/node.go | 2 +- beacon-chain/p2p/config.go | 2 +- beacon-chain/p2p/connection_gater_test.go | 2 +- beacon-chain/p2p/interfaces.go | 2 +- .../p2p/peers/scorers/block_providers.go | 4 ++-- .../p2p/peers/scorers/block_providers_test.go | 10 +++++----- beacon-chain/p2p/peers/scorers/service_test.go | 4 ++-- beacon-chain/p2p/peers/status.go | 2 +- beacon-chain/p2p/subnets.go | 13 ++++++------- beacon-chain/p2p/testing/fuzz_p2p.go | 2 +- beacon-chain/p2p/testing/mock_peermanager.go | 2 +- beacon-chain/p2p/testing/p2p.go | 2 +- .../sync/initial-sync/blocks_fetcher_peers.go | 8 ++++---- .../initial-sync/blocks_fetcher_peers_test.go | 2 +- .../sync/initial-sync/blocks_fetcher_test.go | 12 ++++++------ .../initial-sync/blocks_fetcher_utils_test.go | 2 +- beacon-chain/sync/initial-sync/service.go | 4 ++-- beacon-chain/sync/initial-sync/service_test.go | 2 +- .../sync/rpc_beacon_blocks_by_range.go | 2 +- .../sync/rpc_beacon_blocks_by_range_test.go | 4 ++-- beacon-chain/sync/subscriber.go | 4 ++-- beacon-chain/sync/subscriber_test.go | 2 +- cmd/beacon-chain/flags/base.go | 6 +++--- cmd/beacon-chain/flags/config.go | 18 +++++++++--------- cmd/flags.go | 2 +- 25 files changed, 57 insertions(+), 58 deletions(-) diff --git a/beacon-chain/node/node.go b/beacon-chain/node/node.go index 2aa2f2b4c2..7e2ce1899b 100644 --- a/beacon-chain/node/node.go +++ b/beacon-chain/node/node.go @@ -489,7 +489,7 @@ func (b *BeaconNode) registerP2P(cliCtx *cli.Context) error { MetaDataDir: cliCtx.String(cmd.P2PMetadata.Name), TCPPort: cliCtx.Uint(cmd.P2PTCPPort.Name), UDPPort: cliCtx.Uint(cmd.P2PUDPPort.Name), - MaxPeers: cliCtx.Uint(cmd.P2PMaxPeers.Name), + MaxPeers: cliCtx.Uint64(cmd.P2PMaxPeers.Name), AllowListCIDR: cliCtx.String(cmd.P2PAllowList.Name), DenyListCIDR: slice.SplitCommaSeparated(cliCtx.StringSlice(cmd.P2PDenyList.Name)), EnableUPnP: cliCtx.Bool(cmd.EnableUPnPFlag.Name), diff --git a/beacon-chain/p2p/config.go b/beacon-chain/p2p/config.go index 6aa38576fd..8dc9824405 100644 --- a/beacon-chain/p2p/config.go +++ b/beacon-chain/p2p/config.go @@ -23,7 +23,7 @@ type Config struct { MetaDataDir string TCPPort uint UDPPort uint - MaxPeers uint + MaxPeers uint64 AllowListCIDR string DenyListCIDR []string StateNotifier statefeed.Notifier diff --git a/beacon-chain/p2p/connection_gater_test.go b/beacon-chain/p2p/connection_gater_test.go index 984f74ff8c..e36f697d8d 100644 --- a/beacon-chain/p2p/connection_gater_test.go +++ b/beacon-chain/p2p/connection_gater_test.go @@ -104,7 +104,7 @@ func TestService_RejectInboundPeersBeyondLimit(t *testing.T) { ScorerParams: &scorers.Config{}, }), host: mockp2p.NewTestP2P(t).BHost, - cfg: &Config{MaxPeers: uint(limit)}, + cfg: &Config{MaxPeers: uint64(limit)}, } var err error s.addrFilter, err = configureFilter(&Config{}) diff --git a/beacon-chain/p2p/interfaces.go b/beacon-chain/p2p/interfaces.go index 4dfa062a89..d1bf920918 100644 --- a/beacon-chain/p2p/interfaces.go +++ b/beacon-chain/p2p/interfaces.go @@ -77,7 +77,7 @@ type PeerManager interface { ENR() *enr.Record DiscoveryAddresses() ([]multiaddr.Multiaddr, error) RefreshENR() - FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold int) (bool, error) + FindPeersWithSubnet(ctx context.Context, topic string, subIndex uint64, threshold uint64) (bool, error) AddPingMethod(reqFunc func(ctx context.Context, id peer.ID) error) } diff --git a/beacon-chain/p2p/peers/scorers/block_providers.go b/beacon-chain/p2p/peers/scorers/block_providers.go index 77b362a1ce..34cfa48e9c 100644 --- a/beacon-chain/p2p/peers/scorers/block_providers.go +++ b/beacon-chain/p2p/peers/scorers/block_providers.go @@ -85,7 +85,7 @@ func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerCo if scorer.config.StalePeerRefreshInterval == 0 { scorer.config.StalePeerRefreshInterval = DefaultBlockProviderStalePeerRefreshInterval } - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit scorer.maxScore = 1.0 if batchSize > 0 { totalBatches := float64(scorer.config.ProcessedBlocksCap / batchSize) @@ -110,7 +110,7 @@ func (s *BlockProviderScorer) score(pid peer.ID) float64 { if !ok || time.Since(peerData.BlockProviderUpdated) >= s.config.StalePeerRefreshInterval { return s.maxScore } - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit if batchSize > 0 { processedBatches := float64(peerData.ProcessedBlocks / batchSize) score += processedBatches * s.config.ProcessedBatchWeight diff --git a/beacon-chain/p2p/peers/scorers/block_providers_test.go b/beacon-chain/p2p/peers/scorers/block_providers_test.go index 27ef73ffd7..16427e045e 100644 --- a/beacon-chain/p2p/peers/scorers/block_providers_test.go +++ b/beacon-chain/p2p/peers/scorers/block_providers_test.go @@ -21,7 +21,7 @@ func TestScorers_BlockProvider_Score(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit tests := []struct { name string update func(scorer *scorers.BlockProviderScorer) @@ -160,7 +160,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) { }, }) scorer := peerStatuses.Scorers().BlockProviderScorer() - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit r := rand.NewDeterministicGenerator() reverse := func(pids []peer.ID) []peer.ID { @@ -214,7 +214,7 @@ func TestScorers_BlockProvider_WeightSorted(t *testing.T) { } func TestScorers_BlockProvider_Sorted(t *testing.T) { - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit tests := []struct { name string update func(s *scorers.BlockProviderScorer) @@ -309,7 +309,7 @@ func TestScorers_BlockProvider_Sorted(t *testing.T) { func TestScorers_BlockProvider_MaxScore(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit tests := []struct { name string @@ -347,7 +347,7 @@ func TestScorers_BlockProvider_MaxScore(t *testing.T) { func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit format := "[%0.1f%%, raw: %0.2f, blocks: %d/1280]" tests := []struct { diff --git a/beacon-chain/p2p/peers/scorers/service_test.go b/beacon-chain/p2p/peers/scorers/service_test.go index a55ef31de4..d18fc46bff 100644 --- a/beacon-chain/p2p/peers/scorers/service_test.go +++ b/beacon-chain/p2p/peers/scorers/service_test.go @@ -17,7 +17,7 @@ func TestScorers_Service_Init(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) defer cancel() - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit t.Run("default config", func(t *testing.T) { peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{ @@ -82,7 +82,7 @@ func TestScorers_Service_Score(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) defer cancel() - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit peerScores := func(s *scorers.Service, pids []peer.ID) map[string]float64 { scores := make(map[string]float64, len(pids)) diff --git a/beacon-chain/p2p/peers/status.go b/beacon-chain/p2p/peers/status.go index 391d0ecf48..da80ef630a 100644 --- a/beacon-chain/p2p/peers/status.go +++ b/beacon-chain/p2p/peers/status.go @@ -696,7 +696,7 @@ func (p *Status) BestFinalized(maxPeers int, ourFinalizedEpoch types.Epoch) (typ // BestNonFinalized returns the highest known epoch, higher than ours, // and is shared by at least minPeers. -func (p *Status) BestNonFinalized(minPeers int, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) { +func (p *Status) BestNonFinalized(minPeers uint64, ourHeadEpoch types.Epoch) (types.Epoch, []peer.ID) { connected := p.Connected() epochVotes := make(map[types.Epoch]uint64) pidEpoch := make(map[peer.ID]types.Epoch, len(connected)) diff --git a/beacon-chain/p2p/subnets.go b/beacon-chain/p2p/subnets.go index 8c12e55f92..511b15431f 100644 --- a/beacon-chain/p2p/subnets.go +++ b/beacon-chain/p2p/subnets.go @@ -33,12 +33,11 @@ const syncLockerVal = 100 // subscribed to a particular subnet. Then we try to connect // with those peers. This method will block until the required amount of // peers are found, the method only exits in the event of context timeouts. -func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, - index uint64, threshold int) (bool, error) { +func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, subIndex, threshold uint64) (bool, error) { ctx, span := trace.StartSpan(ctx, "p2p.FindPeersWithSubnet") defer span.End() - span.AddAttributes(trace.Int64Attribute("index", int64(index))) + span.AddAttributes(trace.Int64Attribute("index", int64(subIndex))) if s.dv5Listener == nil { // return if discovery isn't set @@ -49,14 +48,14 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, iterator := s.dv5Listener.RandomNodes() switch { case strings.Contains(topic, GossipAttestationMessage): - iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(index)) + iterator = filterNodes(ctx, iterator, s.filterPeerForAttSubnet(subIndex)) case strings.Contains(topic, GossipSyncCommitteeMessage): - iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(index)) + iterator = filterNodes(ctx, iterator, s.filterPeerForSyncSubnet(subIndex)) default: return false, errors.New("no subnet exists for provided topic") } - currNum := len(s.pubsub.ListPeers(topic)) + currNum := uint64(len(s.pubsub.ListPeers(topic))) wg := new(sync.WaitGroup) for { if err := ctx.Err(); err != nil { @@ -81,7 +80,7 @@ func (s *Service) FindPeersWithSubnet(ctx context.Context, topic string, } // Wait for all dials to be completed. wg.Wait() - currNum = len(s.pubsub.ListPeers(topic)) + currNum = uint64(len(s.pubsub.ListPeers(topic))) } return true, nil } diff --git a/beacon-chain/p2p/testing/fuzz_p2p.go b/beacon-chain/p2p/testing/fuzz_p2p.go index 06e0c281bf..e24a466628 100644 --- a/beacon-chain/p2p/testing/fuzz_p2p.go +++ b/beacon-chain/p2p/testing/fuzz_p2p.go @@ -61,7 +61,7 @@ func (p *FakeP2P) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { } // FindPeersWithSubnet mocks the p2p func. -func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { +func (p *FakeP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { return false, nil } diff --git a/beacon-chain/p2p/testing/mock_peermanager.go b/beacon-chain/p2p/testing/mock_peermanager.go index 7c4b7b7774..b94157c568 100644 --- a/beacon-chain/p2p/testing/mock_peermanager.go +++ b/beacon-chain/p2p/testing/mock_peermanager.go @@ -51,7 +51,7 @@ func (m MockPeerManager) DiscoveryAddresses() ([]multiaddr.Multiaddr, error) { func (m MockPeerManager) RefreshENR() {} // FindPeersWithSubnet . -func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { +func (m MockPeerManager) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { return true, nil } diff --git a/beacon-chain/p2p/testing/p2p.go b/beacon-chain/p2p/testing/p2p.go index 5819ad655d..994fe76fcb 100644 --- a/beacon-chain/p2p/testing/p2p.go +++ b/beacon-chain/p2p/testing/p2p.go @@ -349,7 +349,7 @@ func (p *TestP2P) Peers() *peers.Status { } // FindPeersWithSubnet mocks the p2p func. -func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _ uint64, _ int) (bool, error) { +func (p *TestP2P) FindPeersWithSubnet(_ context.Context, _ string, _, _ uint64) (bool, error) { return false, nil } diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go b/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go index aafe44fdc1..7fdd66be75 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_peers.go @@ -63,7 +63,7 @@ func (f *blocksFetcher) selectFailOverPeer(excludedPID peer.ID, peers []peer.ID) // waitForMinimumPeers spins and waits up until enough peers are available. func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, error) { - required := params.BeaconConfig().MaxPeersToSync + required := uint64(params.BeaconConfig().MaxPeersToSync) if flags.Get().MinimumSyncPeers < required { required = flags.Get().MinimumSyncPeers } @@ -79,7 +79,7 @@ func (f *blocksFetcher) waitForMinimumPeers(ctx context.Context) ([]peer.ID, err headEpoch := slots.ToEpoch(f.chain.HeadSlot()) _, peers = f.p2p.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, headEpoch) } - if len(peers) >= required { + if uint64(len(peers)) >= required { return peers, nil } log.WithFields(logrus.Fields{ @@ -123,14 +123,14 @@ func (f *blocksFetcher) filterPeers(ctx context.Context, peers []peer.ID, peersP // trimPeers limits peer list, returning only specified percentage of peers. // Takes system constraints into account (min/max peers to sync). func trimPeers(peers []peer.ID, peersPercentage float64) []peer.ID { - required := params.BeaconConfig().MaxPeersToSync + required := uint64(params.BeaconConfig().MaxPeersToSync) if flags.Get().MinimumSyncPeers < required { required = flags.Get().MinimumSyncPeers } // Weak/slow peers will be pushed down the list and trimmed since only percentage of peers is selected. limit := uint64(math.Round(float64(len(peers)) * peersPercentage)) // Limit cannot be less that minimum peers required by sync mechanism. - limit = mathutil.Max(limit, uint64(required)) + limit = mathutil.Max(limit, required) // Limit cannot be higher than number of peers available (safe-guard). limit = mathutil.Min(limit, uint64(len(peers))) return peers[:limit] diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go index 1e1697707b..17542739fe 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_peers_test.go @@ -118,7 +118,7 @@ func TestBlocksFetcher_filterPeers(t *testing.T) { capacityWeight float64 } - batchSize := uint64(flags.Get().BlockBatchLimit) + batchSize := flags.Get().BlockBatchLimit tests := []struct { name string args args diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go index ed382b5869..e2aa548187 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_test.go @@ -372,7 +372,7 @@ func TestBlocksFetcher_RoundRobin(t *testing.T) { } func TestBlocksFetcher_scheduleRequest(t *testing.T) { - blockBatchLimit := uint64(flags.Get().BlockBatchLimit) + blockBatchLimit := flags.Get().BlockBatchLimit t.Run("context cancellation", func(t *testing.T) { ctx, cancel := context.WithCancel(context.Background()) fetcher := newBlocksFetcher(ctx, &blocksFetcherConfig{}) @@ -426,7 +426,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { }) cancel() - response := fetcher.handleRequest(ctx, 1, uint64(blockBatchLimit)) + response := fetcher.handleRequest(ctx, 1, blockBatchLimit) assert.ErrorContains(t, "context canceled", response.err) }) @@ -441,7 +441,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { requestCtx, reqCancel := context.WithTimeout(context.Background(), 2*time.Second) defer reqCancel() go func() { - response := fetcher.handleRequest(requestCtx, 1 /* start */, uint64(blockBatchLimit) /* count */) + response := fetcher.handleRequest(requestCtx, 1 /* start */, blockBatchLimit /* count */) select { case <-ctx.Done(): case fetcher.fetchResponses <- response: @@ -459,7 +459,7 @@ func TestBlocksFetcher_handleRequest(t *testing.T) { blocks = resp.blocks } } - if uint64(len(blocks)) != uint64(blockBatchLimit) { + if uint64(len(blocks)) != blockBatchLimit { t.Errorf("incorrect number of blocks returned, expected: %v, got: %v", blockBatchLimit, len(blocks)) } @@ -510,11 +510,11 @@ func TestBlocksFetcher_requestBeaconBlocksByRange(t *testing.T) { req := &p2ppb.BeaconBlocksByRangeRequest{ StartSlot: 1, Step: 1, - Count: uint64(blockBatchLimit), + Count: blockBatchLimit, } blocks, err := fetcher.requestBlocks(ctx, req, peerIDs[0]) assert.NoError(t, err) - assert.Equal(t, uint64(blockBatchLimit), uint64(len(blocks)), "Incorrect number of blocks returned") + assert.Equal(t, blockBatchLimit, uint64(len(blocks)), "Incorrect number of blocks returned") // Test context cancellation. ctx, cancel = context.WithCancel(context.Background()) diff --git a/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go b/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go index 84ec9fc256..f5f8829e4d 100644 --- a/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go +++ b/beacon-chain/sync/initial-sync/blocks_fetcher_utils_test.go @@ -201,7 +201,7 @@ func TestBlocksFetcher_findFork(t *testing.T) { peers = append(peers, connectPeerHavingBlocks(t, p2p, chain1, finalizedSlot, p2p.Peers())) } - blockBatchLimit := uint64(flags.Get().BlockBatchLimit) * 2 + blockBatchLimit := flags.Get().BlockBatchLimit * 2 pidInd := 0 for i := uint64(1); i < uint64(len(chain1)); i += blockBatchLimit { req := &p2ppb.BeaconBlocksByRangeRequest{ diff --git a/beacon-chain/sync/initial-sync/service.go b/beacon-chain/sync/initial-sync/service.go index 1f6d6407d6..a417848e43 100644 --- a/beacon-chain/sync/initial-sync/service.go +++ b/beacon-chain/sync/initial-sync/service.go @@ -163,13 +163,13 @@ func (s *Service) Resync() error { } func (s *Service) waitForMinimumPeers() { - required := params.BeaconConfig().MaxPeersToSync + required := uint64(params.BeaconConfig().MaxPeersToSync) if flags.Get().MinimumSyncPeers < required { required = flags.Get().MinimumSyncPeers } for { _, peers := s.cfg.P2P.Peers().BestNonFinalized(flags.Get().MinimumSyncPeers, s.cfg.Chain.FinalizedCheckpt().Epoch) - if len(peers) >= required { + if uint64(len(peers)) >= required { break } log.WithFields(logrus.Fields{ diff --git a/beacon-chain/sync/initial-sync/service_test.go b/beacon-chain/sync/initial-sync/service_test.go index 60617a52ec..39c7593aed 100644 --- a/beacon-chain/sync/initial-sync/service_test.go +++ b/beacon-chain/sync/initial-sync/service_test.go @@ -27,7 +27,7 @@ import ( ) func TestService_Constants(t *testing.T) { - if params.BeaconConfig().MaxPeersToSync*flags.Get().BlockBatchLimit > 1000 { + if uint64(params.BeaconConfig().MaxPeersToSync)*flags.Get().BlockBatchLimit > uint64(1000) { t.Fatal("rpc rejects requests over 1000 range slots") } } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range.go b/beacon-chain/sync/rpc_beacon_blocks_by_range.go index 830d9e5828..f7ee288274 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range.go @@ -43,7 +43,7 @@ func (s *Service) beaconBlocksByRangeRPCHandler(ctx context.Context, msg interfa // The initial count for the first batch to be returned back. count := m.Count - allowedBlocksPerSecond := uint64(flags.Get().BlockBatchLimit) + allowedBlocksPerSecond := flags.Get().BlockBatchLimit if count > allowedBlocksPerSecond { count = allowedBlocksPerSecond } diff --git a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go index 1e83bf3f18..33a1b87108 100644 --- a/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go +++ b/beacon-chain/sync/rpc_beacon_blocks_by_range_test.go @@ -394,11 +394,11 @@ func TestRPCBeaconBlocksByRange_RPCHandlerRateLimitOverflow(t *testing.T) { req := &pb.BeaconBlocksByRangeRequest{ StartSlot: 100, Step: 1, - Count: uint64(flags.Get().BlockBatchLimit), + Count: flags.Get().BlockBatchLimit, } saveBlocks(req) - for i := 0; i < flags.Get().BlockBatchLimitBurstFactor; i++ { + for i := uint64(0); i < flags.Get().BlockBatchLimitBurstFactor; i++ { assert.NoError(t, sendRequest(p1, p2, r, req, true, false)) } diff --git a/beacon-chain/sync/subscriber.go b/beacon-chain/sync/subscriber.go index 5cb4f831e0..c05b845e09 100644 --- a/beacon-chain/sync/subscriber.go +++ b/beacon-chain/sync/subscriber.go @@ -641,7 +641,7 @@ func (s *Service) unSubscribeFromTopic(topic string) { // find if we have peers who are subscribed to the same subnet func (s *Service) validPeersExist(subnetTopic string) bool { numOfPeers := s.cfg.p2p.PubSub().ListPeers(subnetTopic + s.cfg.p2p.Encoding().ProtocolSuffix()) - return len(numOfPeers) >= flags.Get().MinimumPeersPerSubnet + return uint64(len(numOfPeers)) >= flags.Get().MinimumPeersPerSubnet } func (s *Service) retrievePersistentSubs(currSlot types.Slot) []uint64 { @@ -682,7 +682,7 @@ func (s *Service) filterNeededPeers(pids []peer.ID) []peer.ID { for _, sub := range wantedSubs { subnetTopic := fmt.Sprintf(topic, digest, sub) + s.cfg.p2p.Encoding().ProtocolSuffix() peers := s.cfg.p2p.PubSub().ListPeers(subnetTopic) - if len(peers) > flags.Get().MinimumPeersPerSubnet { + if uint64(len(peers)) > flags.Get().MinimumPeersPerSubnet { // In the event we have more than the minimum, we can // mark the remaining as viable for pruning. peers = peers[:flags.Get().MinimumPeersPerSubnet] diff --git a/beacon-chain/sync/subscriber_test.go b/beacon-chain/sync/subscriber_test.go index 4798adebe1..be48b7d35e 100644 --- a/beacon-chain/sync/subscriber_test.go +++ b/beacon-chain/sync/subscriber_test.go @@ -489,7 +489,7 @@ func TestFilterSubnetPeers(t *testing.T) { // Try with only peers from subnet 20. wantedPeers = []peer.ID{p2.BHost.ID()} // Connect an excess amount of peers in the particular subnet. - for i := 1; i <= flags.Get().MinimumPeersPerSubnet; i++ { + for i := uint64(1); i <= flags.Get().MinimumPeersPerSubnet; i++ { nPeer := createPeer(t, subnet20) p.Connect(nPeer) wantedPeers = append(wantedPeers, nPeer.BHost.ID()) diff --git a/cmd/beacon-chain/flags/base.go b/cmd/beacon-chain/flags/base.go index dce19df517..6a4f50d574 100644 --- a/cmd/beacon-chain/flags/base.go +++ b/cmd/beacon-chain/flags/base.go @@ -88,7 +88,7 @@ var ( } // MinSyncPeers specifies the required number of successful peer handshakes in order // to start syncing with external peers. - MinSyncPeers = &cli.IntFlag{ + MinSyncPeers = &cli.Uint64Flag{ Name: "min-sync-peers", Usage: "The required number of valid peers to connect with before syncing.", Value: 3, @@ -123,13 +123,13 @@ var ( Usage: "Does not run the discoveryV5 dht.", } // BlockBatchLimit specifies the requested block batch size. - BlockBatchLimit = &cli.IntFlag{ + BlockBatchLimit = &cli.Uint64Flag{ Name: "block-batch-limit", Usage: "The amount of blocks the local peer is bounded to request and respond to in a batch.", Value: 64, } // BlockBatchLimitBurstFactor specifies the factor by which block batch size may increase. - BlockBatchLimitBurstFactor = &cli.IntFlag{ + BlockBatchLimitBurstFactor = &cli.Uint64Flag{ Name: "block-batch-limit-burst-factor", Usage: "The factor by which block batch limit may increase on burst.", Value: 10, diff --git a/cmd/beacon-chain/flags/config.go b/cmd/beacon-chain/flags/config.go index fd35ab416e..74230e9533 100644 --- a/cmd/beacon-chain/flags/config.go +++ b/cmd/beacon-chain/flags/config.go @@ -12,10 +12,10 @@ type GlobalFlags struct { DisableSync bool DisableDiscv5 bool SubscribeToAllSubnets bool - MinimumSyncPeers int - MinimumPeersPerSubnet int - BlockBatchLimit int - BlockBatchLimitBurstFactor int + MinimumSyncPeers uint64 + MinimumPeersPerSubnet uint64 + BlockBatchLimit uint64 + BlockBatchLimitBurstFactor uint64 } var globalConfig *GlobalFlags @@ -50,17 +50,17 @@ func ConfigureGlobalFlags(ctx *cli.Context) { cfg.SubscribeToAllSubnets = true } cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name) - cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name) - cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name) - cfg.MinimumPeersPerSubnet = ctx.Int(MinPeersPerSubnet.Name) + cfg.BlockBatchLimit = ctx.Uint64(BlockBatchLimit.Name) + cfg.BlockBatchLimitBurstFactor = ctx.Uint64(BlockBatchLimitBurstFactor.Name) + cfg.MinimumPeersPerSubnet = ctx.Uint64(MinPeersPerSubnet.Name) configureMinimumPeers(ctx, cfg) Init(cfg) } func configureMinimumPeers(ctx *cli.Context, cfg *GlobalFlags) { - cfg.MinimumSyncPeers = ctx.Int(MinSyncPeers.Name) - maxPeers := ctx.Int(cmd.P2PMaxPeers.Name) + cfg.MinimumSyncPeers = ctx.Uint64(MinSyncPeers.Name) + maxPeers := ctx.Uint64(cmd.P2PMaxPeers.Name) if cfg.MinimumSyncPeers > maxPeers { log.Warnf("Changing Minimum Sync Peers to %d", maxPeers) cfg.MinimumSyncPeers = maxPeers diff --git a/cmd/flags.go b/cmd/flags.go index 1d8e34819e..eb408b5ba9 100644 --- a/cmd/flags.go +++ b/cmd/flags.go @@ -150,7 +150,7 @@ var ( Value: "", } // P2PMaxPeers defines a flag to specify the max number of peers in libp2p. - P2PMaxPeers = &cli.IntFlag{ + P2PMaxPeers = &cli.Uint64Flag{ Name: "p2p-max-peers", Usage: "The max number of p2p peers to maintain.", Value: 45, From 1d216a87378318991dab2682cf4320c81daa50a7 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Wed, 1 Dec 2021 22:32:34 -0500 Subject: [PATCH 44/45] Filter Errored Keys from Returned Slashing Protection History in Standard API (#9968) * add err condition * naming --- validator/rpc/standard_api.go | 10 ++++++++-- validator/slashing-protection-history/export.go | 10 +++++----- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/validator/rpc/standard_api.go b/validator/rpc/standard_api.go index 10baf63609..0af7ce630c 100644 --- a/validator/rpc/standard_api.go +++ b/validator/rpc/standard_api.go @@ -90,8 +90,14 @@ func (s *Server) DeleteKeystores( if err != nil { return nil, status.Errorf(codes.Internal, "Could not delete keys: %v", err) } - keysToFilter := req.PublicKeys - exportedHistory, err := slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, keysToFilter...) + // We select keys that were deleted for retrieving slashing protection history. + filteredKeys := make([][]byte, 0, len(req.PublicKeys)) + for i, st := range statuses { + if st.Status != ethpbservice.DeletedKeystoreStatus_ERROR { + filteredKeys = append(filteredKeys, req.PublicKeys[i]) + } + } + exportedHistory, err := slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, filteredKeys...) if err != nil { return nil, status.Errorf( codes.Internal, diff --git a/validator/slashing-protection-history/export.go b/validator/slashing-protection-history/export.go index 37f6a577b5..a90af606b4 100644 --- a/validator/slashing-protection-history/export.go +++ b/validator/slashing-protection-history/export.go @@ -19,10 +19,10 @@ import ( func ExportStandardProtectionJSON( ctx context.Context, validatorDB db.Database, - keysToFilter ...[]byte, + filteredKeys ...[]byte, ) (*format.EIPSlashingProtectionFormat, error) { - keysFilterMap := make(map[string]bool, len(keysToFilter)) - for _, k := range keysToFilter { + keysFilterMap := make(map[string]bool, len(filteredKeys)) + for _, k := range filteredKeys { keysFilterMap[string(k)] = true } interchangeJSON := &format.EIPSlashingProtectionFormat{} @@ -58,7 +58,7 @@ func ExportStandardProtectionJSON( len(proposedPublicKeys), "Extracting signed blocks by validator public key", ) for _, pubKey := range proposedPublicKeys { - if _, ok := keysFilterMap[string(pubKey[:])]; len(keysToFilter) > 0 && !ok { + if _, ok := keysFilterMap[string(pubKey[:])]; len(filteredKeys) > 0 && !ok { continue } pubKeyHex, err := pubKeyToHexString(pubKey[:]) @@ -84,7 +84,7 @@ func ExportStandardProtectionJSON( len(attestedPublicKeys), "Extracting signed attestations by validator public key", ) for _, pubKey := range attestedPublicKeys { - if _, ok := keysFilterMap[string(pubKey[:])]; len(keysToFilter) > 0 && !ok { + if _, ok := keysFilterMap[string(pubKey[:])]; len(filteredKeys) > 0 && !ok { continue } pubKeyHex, err := pubKeyToHexString(pubKey[:]) From d3c97da4e173132f303f1d2efba3564f5e5296d1 Mon Sep 17 00:00:00 2001 From: Raul Jordan Date: Thu, 2 Dec 2021 09:58:49 -0500 Subject: [PATCH 45/45] Ensure Slashing Protection Exports and Keymanager API Work According to Spec (#9938) * password compliance * delete keys tests * changes to slashing protection exports * export tests pass * fix up failures * gaz * table driven tests for delete keystores * comment * rem deletion logic * look ma, no db * fix up tests * ineff * gaz * broken test fix * Update validator/keymanager/imported/delete.go * rem --- validator/accounts/accounts_import.go | 6 +- validator/accounts/accounts_list_test.go | 4 +- validator/keymanager/imported/delete.go | 1 + validator/keymanager/imported/delete_test.go | 4 +- validator/keymanager/imported/import.go | 15 +- validator/keymanager/imported/import_test.go | 6 +- .../keymanager/imported/keymanager_test.go | 4 +- validator/rpc/BUILD.bazel | 2 + validator/rpc/standard_api.go | 90 ++++++++- validator/rpc/standard_api_test.go | 178 ++++++++++++------ .../slashing-protection-history/export.go | 14 +- .../round_trip_test.go | 11 ++ 12 files changed, 249 insertions(+), 86 deletions(-) diff --git a/validator/accounts/accounts_import.go b/validator/accounts/accounts_import.go index ad98b8defe..c7f96cedb9 100644 --- a/validator/accounts/accounts_import.go +++ b/validator/accounts/accounts_import.go @@ -239,10 +239,14 @@ func ImportAccountsCli(cliCtx *cli.Context) error { // ImportAccounts can import external, EIP-2335 compliant keystore.json files as // new accounts into the Prysm validator wallet. func ImportAccounts(ctx context.Context, cfg *ImportAccountsConfig) ([]*ethpbservice.ImportedKeystoreStatus, error) { + passwords := make([]string, len(cfg.Keystores)) + for i := 0; i < len(cfg.Keystores); i++ { + passwords[i] = cfg.AccountPassword + } return cfg.Importer.ImportKeystores( ctx, cfg.Keystores, - []string{cfg.AccountPassword}, + passwords, ) } diff --git a/validator/accounts/accounts_list_test.go b/validator/accounts/accounts_list_test.go index 0989137f4a..6904b77f66 100644 --- a/validator/accounts/accounts_list_test.go +++ b/validator/accounts/accounts_list_test.go @@ -93,10 +93,12 @@ func TestListAccounts_ImportedKeymanager(t *testing.T) { numAccounts := 5 keystores := make([]*keymanager.Keystore, numAccounts) + passwords := make([]string, numAccounts) for i := 0; i < numAccounts; i++ { keystores[i] = createRandomKeystore(t, password) + passwords[i] = password } - _, err = km.ImportKeystores(cliCtx.Context, keystores, []string{password}) + _, err = km.ImportKeystores(cliCtx.Context, keystores, passwords) require.NoError(t, err) rescueStdout := os.Stdout diff --git a/validator/keymanager/imported/delete.go b/validator/keymanager/imported/delete.go index e50f81a2e0..2f586e61a4 100644 --- a/validator/keymanager/imported/delete.go +++ b/validator/keymanager/imported/delete.go @@ -73,6 +73,7 @@ func (km *Keymanager) DeleteKeystores( deletedKeysStr += fmt.Sprintf(",%#x", bytesutil.Trunc(k)) } } + log.WithFields(logrus.Fields{ "publicKeys": deletedKeysStr, }).Info("Successfully deleted validator key(s)") diff --git a/validator/keymanager/imported/delete_test.go b/validator/keymanager/imported/delete_test.go index 2445e3d561..457dc260b8 100644 --- a/validator/keymanager/imported/delete_test.go +++ b/validator/keymanager/imported/delete_test.go @@ -29,10 +29,12 @@ func TestImportedKeymanager_DeleteKeystores(t *testing.T) { numAccounts := 5 ctx := context.Background() keystores := make([]*keymanager.Keystore, numAccounts) + passwords := make([]string, numAccounts) for i := 0; i < numAccounts; i++ { keystores[i] = createRandomKeystore(t, password) + passwords[i] = password } - _, err := dr.ImportKeystores(ctx, keystores, []string{password}) + _, err := dr.ImportKeystores(ctx, keystores, passwords) require.NoError(t, err) accounts, err := dr.FetchValidatingPublicKeys(ctx) require.NoError(t, err) diff --git a/validator/keymanager/imported/import.go b/validator/keymanager/imported/import.go index 45f88b6584..eb6f659de5 100644 --- a/validator/keymanager/imported/import.go +++ b/validator/keymanager/imported/import.go @@ -22,13 +22,10 @@ func (km *Keymanager) ImportKeystores( keystores []*keymanager.Keystore, passwords []string, ) ([]*ethpbservice.ImportedKeystoreStatus, error) { - - var singlePasswordForAll string if len(passwords) == 0 { return nil, errors.New("no passwords provided for keystores") - } else if len(passwords) == 1 { - singlePasswordForAll = passwords[0] - } else if len(passwords) != len(keystores) { + } + if len(passwords) != len(keystores) { return nil, errors.New("number of passwords does not match number of keystores") } @@ -41,13 +38,7 @@ func (km *Keymanager) ImportKeystores( for i := 0; i < len(keystores); i++ { var privKeyBytes []byte var pubKeyBytes []byte - var passwordToUse string - if singlePasswordForAll != "" { - passwordToUse = singlePasswordForAll - } else { - passwordToUse = passwords[i] - } - privKeyBytes, pubKeyBytes, _, err = km.attemptDecryptKeystore(decryptor, keystores[i], passwordToUse) + privKeyBytes, pubKeyBytes, _, err = km.attemptDecryptKeystore(decryptor, keystores[i], passwords[i]) if err != nil { statuses[i] = ðpbservice.ImportedKeystoreStatus{ Status: ethpbservice.ImportedKeystoreStatus_ERROR, diff --git a/validator/keymanager/imported/import_test.go b/validator/keymanager/imported/import_test.go index 909d6e5100..8267f4b86f 100644 --- a/validator/keymanager/imported/import_test.go +++ b/validator/keymanager/imported/import_test.go @@ -117,16 +117,18 @@ func TestImportedKeymanager_ImportKeystores(t *testing.T) { ) require.ErrorContains(t, "number of passwords does not match", err) }) - t.Run("single password used to decrypt all keystores", func(t *testing.T) { + t.Run("same password used to decrypt all keystores", func(t *testing.T) { numKeystores := 5 keystores := make([]*keymanager.Keystore, numKeystores) + passwords := make([]string, numKeystores) for i := 0; i < numKeystores; i++ { keystores[i] = createRandomKeystore(t, password) + passwords[i] = password } statuses, err := dr.ImportKeystores( ctx, keystores, - []string{password}, + passwords, ) require.NoError(t, err) require.Equal(t, numKeystores, len(statuses)) diff --git a/validator/keymanager/imported/keymanager_test.go b/validator/keymanager/imported/keymanager_test.go index 40ea0c16de..14b9cc81b9 100644 --- a/validator/keymanager/imported/keymanager_test.go +++ b/validator/keymanager/imported/keymanager_test.go @@ -96,10 +96,12 @@ func TestImportedKeymanager_Sign(t *testing.T) { ctx := context.Background() numAccounts := 10 keystores := make([]*keymanager.Keystore, numAccounts) + passwords := make([]string, numAccounts) for i := 0; i < numAccounts; i++ { keystores[i] = createRandomKeystore(t, password) + passwords[i] = password } - _, err := dr.ImportKeystores(ctx, keystores, []string{password}) + _, err := dr.ImportKeystores(ctx, keystores, passwords) require.NoError(t, err) var encodedKeystore []byte diff --git a/validator/rpc/BUILD.bazel b/validator/rpc/BUILD.bazel index f6b202a468..b8183f9534 100644 --- a/validator/rpc/BUILD.bazel +++ b/validator/rpc/BUILD.bazel @@ -27,6 +27,7 @@ go_library( "//config/features:go_default_library", "//crypto/bls:go_default_library", "//crypto/rand:go_default_library", + "//encoding/bytesutil:go_default_library", "//io/file:go_default_library", "//io/logs:go_default_library", "//io/prompt:go_default_library", @@ -45,6 +46,7 @@ go_library( "//validator/keymanager/derived:go_default_library", "//validator/keymanager/imported:go_default_library", "//validator/slashing-protection-history:go_default_library", + "//validator/slashing-protection-history/format:go_default_library", "@com_github_fsnotify_fsnotify//:go_default_library", "@com_github_golang_jwt_jwt//:go_default_library", "@com_github_grpc_ecosystem_go_grpc_middleware//:go_default_library", diff --git a/validator/rpc/standard_api.go b/validator/rpc/standard_api.go index 0af7ce630c..2df3feefe1 100644 --- a/validator/rpc/standard_api.go +++ b/validator/rpc/standard_api.go @@ -7,10 +7,12 @@ import ( "fmt" "github.com/golang/protobuf/ptypes/empty" + "github.com/prysmaticlabs/prysm/encoding/bytesutil" ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/prysmaticlabs/prysm/validator/keymanager/derived" slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection-history" + "github.com/prysmaticlabs/prysm/validator/slashing-protection-history/format" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -51,6 +53,12 @@ func (s *Server) ImportKeystores( if !ok { return nil, status.Error(codes.Internal, "Keymanager kind cannot import keys") } + if len(req.Passwords) == 0 { + return nil, status.Error(codes.Internal, "No passwords provided for keystores") + } + if len(req.Passwords) != len(req.Keystores) { + return nil, status.Error(codes.Internal, "Number of passwords does not match number of keystores") + } keystores := make([]*keymanager.Keystore, len(req.Keystores)) for i := 0; i < len(req.Keystores); i++ { k := &keymanager.Keystore{} @@ -72,6 +80,9 @@ func (s *Server) ImportKeystores( if err != nil { return nil, status.Errorf(codes.Internal, "Could not import keystores: %v", err) } + + // If any of the keys imported had a slashing protection history before, we + // stop marking them as deleted from our validator database. return ðpbservice.ImportKeystoresResponse{Statuses: statuses}, nil } @@ -90,14 +101,21 @@ func (s *Server) DeleteKeystores( if err != nil { return nil, status.Errorf(codes.Internal, "Could not delete keys: %v", err) } - // We select keys that were deleted for retrieving slashing protection history. - filteredKeys := make([][]byte, 0, len(req.PublicKeys)) - for i, st := range statuses { - if st.Status != ethpbservice.DeletedKeystoreStatus_ERROR { - filteredKeys = append(filteredKeys, req.PublicKeys[i]) - } + if len(statuses) != len(req.PublicKeys) { + return nil, status.Errorf( + codes.Internal, + "Wanted same amount of statuses %d as public keys %d", + len(statuses), + len(req.PublicKeys), + ) } - exportedHistory, err := slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, filteredKeys...) + + statuses, err = s.transformDeletedKeysStatuses(ctx, req.PublicKeys, statuses) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not transform deleted keys statuses: %v", err) + } + + exportedHistory, err := s.slashingProtectionHistoryForDeletedKeys(ctx, req.PublicKeys, statuses) if err != nil { return nil, status.Errorf( codes.Internal, @@ -109,7 +127,7 @@ func (s *Server) DeleteKeystores( if err != nil { return nil, status.Errorf( codes.Internal, - "Could not export slashing protection history: %v", + "Could not JSON marshal slashing protection history: %v", err, ) } @@ -118,3 +136,59 @@ func (s *Server) DeleteKeystores( SlashingProtection: string(jsonHist), }, nil } + +// For a list of deleted keystore statuses, we check if any NOT_FOUND status actually +// has a corresponding public key in the database. In this case, we transform the status +// to NOT_ACTIVE, as we do have slashing protection history for it and should not mark it +// as NOT_FOUND when returning a response to the caller. +func (s *Server) transformDeletedKeysStatuses( + ctx context.Context, pubKeys [][]byte, statuses []*ethpbservice.DeletedKeystoreStatus, +) ([]*ethpbservice.DeletedKeystoreStatus, error) { + pubKeysInDB, err := s.publicKeysInDB(ctx) + if err != nil { + return nil, status.Errorf(codes.Internal, "Could not get public keys from DB: %v", err) + } + if len(pubKeysInDB) > 0 { + for i := 0; i < len(pubKeys); i++ { + keyExistsInDB := pubKeysInDB[bytesutil.ToBytes48(pubKeys[i])] + if keyExistsInDB && statuses[i].Status == ethpbservice.DeletedKeystoreStatus_NOT_FOUND { + statuses[i].Status = ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE + } + } + } + return statuses, nil +} + +// Gets a map of all public keys in the database, useful for O(1) lookups. +func (s *Server) publicKeysInDB(ctx context.Context) (map[[48]byte]bool, error) { + pubKeysInDB := make(map[[48]byte]bool) + attestedPublicKeys, err := s.valDB.AttestedPublicKeys(ctx) + if err != nil { + return nil, fmt.Errorf("could not get attested public keys from DB: %v", err) + } + proposedPublicKeys, err := s.valDB.ProposedPublicKeys(ctx) + if err != nil { + return nil, fmt.Errorf("could not get proposed public keys from DB: %v", err) + } + for _, pk := range append(attestedPublicKeys, proposedPublicKeys...) { + pubKeysInDB[pk] = true + } + return pubKeysInDB, nil +} + +// Exports slashing protection data for a list of DELETED or NOT_ACTIVE keys only to be used +// as part of the DeleteKeystores endpoint. +func (s *Server) slashingProtectionHistoryForDeletedKeys( + ctx context.Context, pubKeys [][]byte, statuses []*ethpbservice.DeletedKeystoreStatus, +) (*format.EIPSlashingProtectionFormat, error) { + // We select the keys that were DELETED or NOT_ACTIVE from the previous action + // and use that to filter our slashing protection export. + filteredKeys := make([][]byte, 0, len(pubKeys)) + for i, pk := range pubKeys { + if statuses[i].Status == ethpbservice.DeletedKeystoreStatus_DELETED || + statuses[i].Status == ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE { + filteredKeys = append(filteredKeys, pk) + } + } + return slashingprotection.ExportStandardProtectionJSON(ctx, s.valDB, filteredKeys...) +} diff --git a/validator/rpc/standard_api_test.go b/validator/rpc/standard_api_test.go index 53e3f1854d..603c69af9c 100644 --- a/validator/rpc/standard_api_test.go +++ b/validator/rpc/standard_api_test.go @@ -20,6 +20,7 @@ import ( "github.com/prysmaticlabs/prysm/validator/db/kv" "github.com/prysmaticlabs/prysm/validator/keymanager" "github.com/prysmaticlabs/prysm/validator/keymanager/derived" + "github.com/prysmaticlabs/prysm/validator/slashing-protection-history/format" mocks "github.com/prysmaticlabs/prysm/validator/testing" keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4" ) @@ -115,6 +116,20 @@ func TestServer_ImportKeystores(t *testing.T) { }) require.NotNil(t, err) }) + t.Run("error if no passwords in request", func(t *testing.T) { + _, err := s.ImportKeystores(context.Background(), ðpbservice.ImportKeystoresRequest{ + Keystores: []string{"hi"}, + Passwords: []string{}, + }) + require.ErrorContains(t, "No passwords provided", err) + }) + t.Run("error if number of passwords does not match number of keystores", func(t *testing.T) { + _, err := s.ImportKeystores(context.Background(), ðpbservice.ImportKeystoresRequest{ + Keystores: []string{"hi"}, + Passwords: []string{"hi", "hi"}, + }) + require.ErrorContains(t, "Number of passwords does not match", err) + }) t.Run("prevents importing if faulty slashing protection data", func(t *testing.T) { numKeystores := 5 password := "12345678" @@ -135,12 +150,14 @@ func TestServer_ImportKeystores(t *testing.T) { numKeystores := 5 password := "12345678" keystores := make([]*keymanager.Keystore, numKeystores) + passwords := make([]string, numKeystores) publicKeys := make([][48]byte, numKeystores) for i := 0; i < numKeystores; i++ { keystores[i] = createRandomKeystore(t, password) pubKey, err := hex.DecodeString(keystores[i].Pubkey) require.NoError(t, err) publicKeys[i] = bytesutil.ToBytes48(pubKey) + passwords[i] = password } // Create a validator database. @@ -176,7 +193,7 @@ func TestServer_ImportKeystores(t *testing.T) { resp, err := s.ImportKeystores(context.Background(), ðpbservice.ImportKeystoresRequest{ Keystores: encodedKeystores, - Passwords: []string{password}, + Passwords: passwords, SlashingProtection: string(encodedSlashingProtection), }) require.NoError(t, err) @@ -189,46 +206,23 @@ func TestServer_ImportKeystores(t *testing.T) { func TestServer_DeleteKeystores(t *testing.T) { ctx := context.Background() - t.Run("wallet not ready", func(t *testing.T) { - s := Server{} - _, err := s.DeleteKeystores(context.Background(), nil) - require.ErrorContains(t, "Wallet not ready", err) - }) - localWalletDir := setupWalletDir(t) - defaultWalletPath = localWalletDir - w, err := accounts.CreateWalletWithKeymanager(ctx, &accounts.CreateWalletConfig{ - WalletCfg: &wallet.Config{ - WalletDir: defaultWalletPath, - KeymanagerKind: keymanager.Derived, - WalletPassword: strongPass, - }, - SkipMnemonicConfirm: true, - }) - require.NoError(t, err) - km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false}) - require.NoError(t, err) + srv := setupServerWithWallet(t) - s := &Server{ - keymanager: km, - walletInitialized: true, - wallet: w, - } - numAccounts := 50 - dr, ok := km.(*derived.Keymanager) + // We recover 3 accounts from a test mnemonic. + numAccounts := 3 + dr, ok := srv.keymanager.(*derived.Keymanager) require.Equal(t, true, ok) - err = dr.RecoverAccountsFromMnemonic(ctx, mocks.TestMnemonic, "", numAccounts) + err := dr.RecoverAccountsFromMnemonic(ctx, mocks.TestMnemonic, "", numAccounts) require.NoError(t, err) - - publicKeys, err := km.FetchValidatingPublicKeys(ctx) + publicKeys, err := dr.FetchValidatingPublicKeys(ctx) require.NoError(t, err) - require.Equal(t, numAccounts, len(publicKeys)) // Create a validator database. validatorDB, err := kv.NewKVStore(ctx, defaultWalletPath, &kv.Config{ PubKeys: publicKeys, }) require.NoError(t, err) - s.valDB = validatorDB + srv.valDB = validatorDB // Have to close it after import is done otherwise it complains db is not open. defer func() { @@ -248,39 +242,115 @@ func TestServer_DeleteKeystores(t *testing.T) { encoded, err := json.Marshal(mockJSON) require.NoError(t, err) - _, err = s.ImportSlashingProtection(ctx, &validatorpb.ImportSlashingProtectionRequest{ + _, err = srv.ImportSlashingProtection(ctx, &validatorpb.ImportSlashingProtectionRequest{ SlashingProtectionJson: string(encoded), }) require.NoError(t, err) - rawPubKeys := make([][]byte, numAccounts) - for i := 0; i < numAccounts; i++ { - rawPubKeys[i] = publicKeys[i][:] + + // For ease of test setup, we'll give each public key a string identifier. + publicKeysWithId := map[string][48]byte{ + "a": publicKeys[0], + "b": publicKeys[1], + "c": publicKeys[2], } - // Deletes properly and returns slashing protection history. - resp, err := s.DeleteKeystores(ctx, ðpbservice.DeleteKeystoresRequest{ - PublicKeys: rawPubKeys, - }) - require.NoError(t, err) - require.Equal(t, numAccounts, len(resp.Statuses)) - for _, status := range resp.Statuses { - require.Equal(t, ethpbservice.DeletedKeystoreStatus_DELETED, status.Status) + type keyCase struct { + id string + wantProtectionData bool } - publicKeys, err = km.FetchValidatingPublicKeys(ctx) - require.NoError(t, err) - require.Equal(t, 0, len(publicKeys)) - require.Equal(t, numAccounts, len(mockJSON.Data)) + tests := []struct { + keys []*keyCase + wantStatuses []ethpbservice.DeletedKeystoreStatus_Status + }{ + { + keys: []*keyCase{ + {id: "a", wantProtectionData: true}, + {id: "a", wantProtectionData: true}, + {id: "d"}, + {id: "c", wantProtectionData: true}, + }, + wantStatuses: []ethpbservice.DeletedKeystoreStatus_Status{ + ethpbservice.DeletedKeystoreStatus_DELETED, + ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE, + ethpbservice.DeletedKeystoreStatus_NOT_FOUND, + ethpbservice.DeletedKeystoreStatus_DELETED, + }, + }, + { + keys: []*keyCase{ + {id: "a", wantProtectionData: true}, + {id: "c", wantProtectionData: true}, + }, + wantStatuses: []ethpbservice.DeletedKeystoreStatus_Status{ + ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE, + ethpbservice.DeletedKeystoreStatus_NOT_ACTIVE, + }, + }, + { + keys: []*keyCase{ + {id: "x"}, + }, + wantStatuses: []ethpbservice.DeletedKeystoreStatus_Status{ + ethpbservice.DeletedKeystoreStatus_NOT_FOUND, + }, + }, + } + for _, tc := range tests { + keys := make([][]byte, len(tc.keys)) + for i := 0; i < len(tc.keys); i++ { + pk := publicKeysWithId[tc.keys[i].id] + keys[i] = pk[:] + } + resp, err := srv.DeleteKeystores(ctx, ðpbservice.DeleteKeystoresRequest{PublicKeys: keys}) + require.NoError(t, err) + require.Equal(t, len(keys), len(resp.Statuses)) + slashingProtectionData := &format.EIPSlashingProtectionFormat{} + require.NoError(t, json.Unmarshal([]byte(resp.SlashingProtection), slashingProtectionData)) + require.Equal(t, true, len(slashingProtectionData.Data) > 0) - // Returns slashing protection history if already deleted. - resp, err = s.DeleteKeystores(ctx, ðpbservice.DeleteKeystoresRequest{ - PublicKeys: rawPubKeys, + for i := 0; i < len(tc.keys); i++ { + require.Equal( + t, + tc.wantStatuses[i], + resp.Statuses[i].Status, + fmt.Sprintf("Checking status for key %s", tc.keys[i].id), + ) + if tc.keys[i].wantProtectionData { + // We check that we can find the key in the slashing protection data. + var found bool + for _, dt := range slashingProtectionData.Data { + if dt.Pubkey == fmt.Sprintf("%#x", keys[i]) { + found = true + break + } + } + require.Equal(t, true, found) + } + } + } +} + +func setupServerWithWallet(t testing.TB) *Server { + ctx := context.Background() + localWalletDir := setupWalletDir(t) + defaultWalletPath = localWalletDir + w, err := accounts.CreateWalletWithKeymanager(ctx, &accounts.CreateWalletConfig{ + WalletCfg: &wallet.Config{ + WalletDir: defaultWalletPath, + KeymanagerKind: keymanager.Derived, + WalletPassword: strongPass, + }, + SkipMnemonicConfirm: true, }) require.NoError(t, err) - require.Equal(t, numAccounts, len(resp.Statuses)) - for _, status := range resp.Statuses { - require.Equal(t, ethpbservice.DeletedKeystoreStatus_NOT_FOUND, status.Status) + km, err := w.InitializeKeymanager(ctx, iface.InitKeymanagerConfig{ListenForChanges: false}) + require.NoError(t, err) + + return &Server{ + keymanager: km, + walletInitialized: true, + wallet: w, } - require.Equal(t, numAccounts, len(mockJSON.Data)) } func createRandomKeystore(t testing.TB, password string) *keymanager.Keystore { diff --git a/validator/slashing-protection-history/export.go b/validator/slashing-protection-history/export.go index a90af606b4..b7929378ac 100644 --- a/validator/slashing-protection-history/export.go +++ b/validator/slashing-protection-history/export.go @@ -21,10 +21,6 @@ func ExportStandardProtectionJSON( validatorDB db.Database, filteredKeys ...[]byte, ) (*format.EIPSlashingProtectionFormat, error) { - keysFilterMap := make(map[string]bool, len(filteredKeys)) - for _, k := range filteredKeys { - keysFilterMap[string(k)] = true - } interchangeJSON := &format.EIPSlashingProtectionFormat{} genesisValidatorsRoot, err := validatorDB.GenesisValidatorsRoot(ctx) if err != nil { @@ -42,6 +38,12 @@ func ExportStandardProtectionJSON( interchangeJSON.Metadata.GenesisValidatorsRoot = genesisRootHex interchangeJSON.Metadata.InterchangeFormatVersion = format.InterchangeFormatVersion + // Allow for filtering data for the keys we wish to export. + filteredKeysMap := make(map[string]bool, len(filteredKeys)) + for _, k := range filteredKeys { + filteredKeysMap[string(k)] = true + } + // Extract the existing public keys in our database. proposedPublicKeys, err := validatorDB.ProposedPublicKeys(ctx) if err != nil { @@ -58,7 +60,7 @@ func ExportStandardProtectionJSON( len(proposedPublicKeys), "Extracting signed blocks by validator public key", ) for _, pubKey := range proposedPublicKeys { - if _, ok := keysFilterMap[string(pubKey[:])]; len(filteredKeys) > 0 && !ok { + if _, ok := filteredKeysMap[string(pubKey[:])]; len(filteredKeys) > 0 && !ok { continue } pubKeyHex, err := pubKeyToHexString(pubKey[:]) @@ -84,7 +86,7 @@ func ExportStandardProtectionJSON( len(attestedPublicKeys), "Extracting signed attestations by validator public key", ) for _, pubKey := range attestedPublicKeys { - if _, ok := keysFilterMap[string(pubKey[:])]; len(filteredKeys) > 0 && !ok { + if _, ok := filteredKeysMap[string(pubKey[:])]; len(filteredKeys) > 0 && !ok { continue } pubKeyHex, err := pubKeyToHexString(pubKey[:]) diff --git a/validator/slashing-protection-history/round_trip_test.go b/validator/slashing-protection-history/round_trip_test.go index f782f90a91..a87362b9f2 100644 --- a/validator/slashing-protection-history/round_trip_test.go +++ b/validator/slashing-protection-history/round_trip_test.go @@ -40,6 +40,11 @@ func TestImportExport_RoundTrip(t *testing.T) { err = history.ImportStandardProtectionJSON(ctx, validatorDB, buf) require.NoError(t, err) + rawPublicKeys := make([][]byte, numValidators) + for i := 0; i < numValidators; i++ { + rawPublicKeys[i] = publicKeys[i][:] + } + // Next up, we export our slashing protection database into the EIP standard file. // Next, we attempt to import it into our validator database. eipStandard, err := history.ExportStandardProtectionJSON(ctx, validatorDB) @@ -114,6 +119,11 @@ func TestImportExport_RoundTrip_SkippedAttestationEpochs(t *testing.T) { err = history.ImportStandardProtectionJSON(ctx, validatorDB, buf) require.NoError(t, err) + rawPublicKeys := make([][]byte, numValidators) + for i := 0; i < numValidators; i++ { + rawPublicKeys[i] = pubKeys[i][:] + } + // Next up, we export our slashing protection database into the EIP standard file. // Next, we attempt to import it into our validator database. eipStandard, err := history.ExportStandardProtectionJSON(ctx, validatorDB) @@ -157,6 +167,7 @@ func TestImportExport_FilterKeys(t *testing.T) { for i := 0; i < len(rawKeys); i++ { rawKeys[i] = publicKeys[i][:] } + eipStandard, err := history.ExportStandardProtectionJSON(ctx, validatorDB, rawKeys...) require.NoError(t, err)