Compare commits

...

20 Commits

Author SHA1 Message Date
nisdas
ed26181236 Add Readme 2024-06-24 16:18:47 +08:00
nisdas
96c0e15c13 Update Dashboard 2024-06-24 16:09:34 +08:00
nisdas
287d247322 Lint 2024-06-24 12:16:27 +08:00
nisdas
47ab93d594 Add new builder metric 2024-06-24 09:55:25 +08:00
nisdas
8a0aa474a6 Add new builder metric 2024-06-24 08:37:24 +08:00
nisdas
048c8de4a7 Terence's Request 2024-06-23 22:57:08 +08:00
nisdas
5877eaa8d0 Add ZkSync 2024-06-23 22:25:58 +08:00
nisdas
d5c4e91147 Add dashboard 2024-06-23 15:15:58 +08:00
nisdas
152d72af4b Add new blobs gauge 2024-06-23 12:48:22 +08:00
nisdas
cffa012852 Change to Counter 2024-06-23 12:44:47 +08:00
nisdas
4f8c595b52 Include Block Metric 2024-06-23 12:42:40 +08:00
nisdas
03555a15cb Add Error 2024-06-22 19:57:13 +08:00
nisdas
f7dc13d10a Formatting 2024-06-22 19:55:22 +08:00
nisdas
bb12cd169b Add Linea 2024-06-22 19:47:02 +08:00
nisdas
2401b8a13b Reduce Cardinality 2024-06-22 19:34:03 +08:00
nisdas
4f172eae68 Add Metrics Server 2024-06-22 19:23:49 +08:00
nisdas
ce6c9907c4 Add Blobwatcher 2024-06-22 18:12:09 +08:00
kasey
8cd249c1c8 update codegen dep and cleanup organization (#14127)
Co-authored-by: Kasey Kirkham <kasey@users.noreply.github.com>
2024-06-20 23:33:54 +00:00
Preston Van Loon
305d5850e7 ssz: Move stateutil.SliceRoot to ssz package (#14123) 2024-06-20 20:55:15 +00:00
Radosław Kapka
df3a9f218d More tracing in the validator client (#14125)
* More tracing in the validator client

* change context expectation in tests
2024-06-20 16:13:53 +00:00
85 changed files with 21600 additions and 21787 deletions

View File

@@ -69,7 +69,6 @@ go_library(
"@com_github_gorilla_mux//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],

View File

@@ -4,7 +4,6 @@ import (
"fmt"
"github.com/ethereum/go-ethereum/common"
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/cmd"
"github.com/prysmaticlabs/prysm/v5/cmd/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/v5/config/params"
@@ -197,7 +196,3 @@ func configureExecutionSetting(cliCtx *cli.Context) error {
" Default fee recipient will be used as a fall back", checksumAddress.Hex())
return params.SetActive(c)
}
func configureFastSSZHashingAlgorithm() {
fastssz.EnableVectorizedHTR = true
}

View File

@@ -277,8 +277,6 @@ func configureBeacon(cliCtx *cli.Context) error {
return errors.Wrap(err, "could not configure execution setting")
}
configureFastSSZHashingAlgorithm()
return nil
}

View File

@@ -16,7 +16,6 @@ go_library(
"pending_consolidations_root.go",
"pending_partial_withdrawals_root.go",
"reference.go",
"slice_root.go",
"sync_committee.root.go",
"trie_helpers.go",
"unrealized_justification.go",

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func HistoricalSummariesRoot(summaries []*ethpb.HistoricalSummary) ([32]byte, error) {
return SliceRoot(summaries, fieldparams.HistoricalRootsLength)
return ssz.SliceRoot(summaries, fieldparams.HistoricalRootsLength)
}

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingBalanceDepositsRoot(slice []*ethpb.PendingBalanceDeposit) ([32]byte, error) {
return SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
return ssz.SliceRoot(slice, fieldparams.PendingBalanceDepositsLimit)
}

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingConsolidationsRoot(slice []*ethpb.PendingConsolidation) ([32]byte, error) {
return SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
return ssz.SliceRoot(slice, fieldparams.PendingConsolidationsLimit)
}

View File

@@ -2,9 +2,10 @@ package stateutil
import (
fieldparams "github.com/prysmaticlabs/prysm/v5/config/fieldparams"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
)
func PendingPartialWithdrawalsRoot(slice []*ethpb.PendingPartialWithdrawal) ([32]byte, error) {
return SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
return ssz.SliceRoot(slice, fieldparams.PendingPartialWithdrawalsLimit)
}

View File

@@ -2973,8 +2973,8 @@ def prysm_deps():
go_repository(
name = "com_github_prysmaticlabs_fastssz",
importpath = "github.com/prysmaticlabs/fastssz",
sum = "h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=",
version = "v0.0.0-20221107182844-78142813af44",
sum = "h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=",
version = "v0.0.0-20240620202422-a981b8ef89d3",
)
go_repository(
name = "com_github_prysmaticlabs_go_bitfield",

View File

@@ -7,6 +7,7 @@ go_library(
"helpers.go",
"htrutils.go",
"merkleize.go",
"slice_root.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/encoding/ssz",
visibility = ["//visibility:public"],

View File

@@ -144,51 +144,13 @@ func WithdrawalSliceRoot(withdrawals []*enginev1.Withdrawal, limit uint64) ([32]
// DepositRequestsSliceRoot computes the HTR of a slice of deposit receipts.
// The limit parameter is used as input to the bitwise merkleization algorithm.
func DepositRequestsSliceRoot(depositRequests []*enginev1.DepositRequest, limit uint64) ([32]byte, error) {
roots := make([][32]byte, len(depositRequests))
for i := 0; i < len(depositRequests); i++ {
r, err := depositRequests[i].HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
roots[i] = r
}
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
}
bytesRootBuf := new(bytes.Buffer)
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(depositRequests))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal length")
}
bytesRootBufRoot := make([]byte, 32)
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
return MixInLength(bytesRoot, bytesRootBufRoot), nil
return SliceRoot(depositRequests, limit)
}
// WithdrawalRequestsSliceRoot computes the HTR of a slice of withdrawal requests from the EL.
// The limit parameter is used as input to the bitwise merkleization algorithm.
func WithdrawalRequestsSliceRoot(withdrawalRequests []*enginev1.WithdrawalRequest, limit uint64) ([32]byte, error) {
roots := make([][32]byte, len(withdrawalRequests))
for i := 0; i < len(withdrawalRequests); i++ {
r, err := withdrawalRequests[i].HashTreeRoot()
if err != nil {
return [32]byte{}, err
}
roots[i] = r
}
bytesRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
}
bytesRootBuf := new(bytes.Buffer)
if err := binary.Write(bytesRootBuf, binary.LittleEndian, uint64(len(withdrawalRequests))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal length")
}
bytesRootBufRoot := make([]byte, 32)
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
return MixInLength(bytesRoot, bytesRootBufRoot), nil
return SliceRoot(withdrawalRequests, limit)
}
// ByteSliceRoot is a helper func to merkleize an arbitrary List[Byte, N]

View File

@@ -1,4 +1,4 @@
package stateutil
package ssz
import (
"bytes"
@@ -6,11 +6,10 @@ import (
"fmt"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v5/encoding/ssz"
)
// SliceRoot computes the root of a slice of hashable objects.
func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
func SliceRoot[T Hashable](slice []T, limit uint64) ([32]byte, error) {
max := limit
if uint64(len(slice)) > max {
return [32]byte{}, fmt.Errorf("slice exceeds max length %d", max)
@@ -25,7 +24,7 @@ func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
roots[i] = r
}
sliceRoot, err := ssz.BitwiseMerkleize(roots, uint64(len(roots)), limit)
sliceRoot, err := BitwiseMerkleize(roots, uint64(len(roots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not slice merkleization")
}
@@ -36,6 +35,5 @@ func SliceRoot[T ssz.Hashable](slice []T, limit uint64) ([32]byte, error) {
// We need to mix in the length of the slice.
sliceLenRoot := make([]byte, 32)
copy(sliceLenRoot, sliceLenBuf.Bytes())
res := ssz.MixInLength(sliceRoot, sliceLenRoot)
return res, nil
return MixInLength(sliceRoot, sliceLenRoot), nil
}

2
go.mod
View File

@@ -65,7 +65,7 @@ require (
github.com/prometheus/client_golang v1.19.1
github.com/prometheus/client_model v0.6.1
github.com/prometheus/prom2json v1.3.0
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e
github.com/prysmaticlabs/prombbolt v0.0.0-20210126082820-9b7adba6db7c
github.com/prysmaticlabs/protoc-gen-go-cast v0.0.0-20230228205207-28762a7b9294

4
go.sum
View File

@@ -966,8 +966,8 @@ github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3c
github.com/prometheus/prom2json v1.3.0 h1:BlqrtbT9lLH3ZsOVhXPsHzFrApCTKRifB7gjJuypu6Y=
github.com/prometheus/prom2json v1.3.0/go.mod h1:rMN7m0ApCowcoDlypBHlkNbp5eJQf/+1isKykIP5ZnM=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44 h1:c3p3UzV4vFA7xaCDphnDWOjpxcadrQ26l5b+ypsvyxo=
github.com/prysmaticlabs/fastssz v0.0.0-20221107182844-78142813af44/go.mod h1:MA5zShstUwCQaE9faGHgCGvEWUbG87p4SAXINhmCkvg=
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3 h1:0LZAwwHnsZFfXm4IK4rzFV4N5IVSKZKLmuBMA4kAlFk=
github.com/prysmaticlabs/fastssz v0.0.0-20240620202422-a981b8ef89d3/go.mod h1:h2OlIZD/M6wFvV3YMZbW16lFgh3Rsye00G44J2cwLyU=
github.com/prysmaticlabs/go-bitfield v0.0.0-20210108222456-8e92c3709aa0/go.mod h1:hCwmef+4qXWjv0jLDbQdWnL0Ol7cS7/lCSS26WR+u6s=
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e h1:ATgOe+abbzfx9kCPeXIW4fiWyDdxlwHw07j8UGhdTd4=
github.com/prysmaticlabs/go-bitfield v0.0.0-20240328144219-a1caa50c3a1e/go.mod h1:wmuf/mdK4VMD+jA9ThwcUKjg3a2XWM9cVfFYjDyY4j4=

View File

@@ -152,7 +152,7 @@
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*\\.pb.*.go": "Generated code is ok",
".*generated\\.ssz\\.go": "Generated code is ok"
".*\\.ssz\\.go": "Generated code is ok"
}
},
"properpermissions": {
@@ -180,7 +180,7 @@
"external/.*": "Third party code",
"rules_go_work-.*": "Third party code",
".*\\.pb.*.go": "Generated code is ok",
".*generated\\.ssz\\.go": "Generated code is ok",
".*\\.ssz\\.go": "Generated code is ok",
".*_test\\.go": "Tests are ok (for now)",
"tools/analyzers/ineffassign/ineffassign\\.go": "3rd party code with a massive switch statement"
}

View File

@@ -31,6 +31,7 @@ proto_library(
ssz_gen_marshal(
name = "ssz_generated_files",
go_proto = ":go_proto",
out = "engine.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
],

View File

@@ -190,7 +190,7 @@ func (e *ExecutionPayload) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 508 {
if o10 != 508 {
return ssz.ErrInvalidVariableOffset
}
@@ -336,11 +336,7 @@ func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -374,25 +370,13 @@ func (e *ExecutionPayload) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -594,7 +578,7 @@ func (e *ExecutionPayloadCapella) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 512 {
if o10 != 512 {
return ssz.ErrInvalidVariableOffset
}
@@ -766,11 +750,7 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -804,18 +784,10 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (14) 'Withdrawals'
@@ -831,18 +803,10 @@ func (e *ExecutionPayloadCapella) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1050,7 +1014,7 @@ func (e *ExecutionPayloadDeneb) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 528 {
if o10 != 528 {
return ssz.ErrInvalidVariableOffset
}
@@ -1228,11 +1192,7 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -1266,18 +1226,10 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (14) 'Withdrawals'
@@ -1293,11 +1245,7 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (15) 'BlobGasUsed'
@@ -1306,11 +1254,7 @@ func (e *ExecutionPayloadDeneb) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (16) 'ExcessBlobGas'
hh.PutUint64(e.ExcessBlobGas)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1548,7 +1492,7 @@ func (e *ExecutionPayloadElectra) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 536 {
if o10 != 536 {
return ssz.ErrInvalidVariableOffset
}
@@ -1778,11 +1722,7 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -1816,18 +1756,10 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.AppendBytes32(elem)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (1073741824+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (1073741824+31)/32)
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 1048576)
} else {
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
hh.MerkleizeWithMixin(subIndx, num, 1048576)
}
// Field (14) 'Withdrawals'
@@ -1843,11 +1775,7 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (15) 'BlobGasUsed'
@@ -1869,11 +1797,7 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 8192)
} else {
hh.MerkleizeWithMixin(subIndx, num, 8192)
}
hh.MerkleizeWithMixin(subIndx, num, 8192)
}
// Field (18) 'WithdrawalRequests'
@@ -1889,18 +1813,10 @@ func (e *ExecutionPayloadElectra) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2067,7 +1983,7 @@ func (e *ExecutionPayloadHeader) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 536 {
if o10 != 536 {
return ssz.ErrInvalidVariableOffset
}
@@ -2185,11 +2101,7 @@ func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -2213,11 +2125,7 @@ func (e *ExecutionPayloadHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(e.TransactionsRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2391,7 +2299,7 @@ func (e *ExecutionPayloadHeaderCapella) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 568 {
if o10 != 568 {
return ssz.ErrInvalidVariableOffset
}
@@ -2515,11 +2423,7 @@ func (e *ExecutionPayloadHeaderCapella) HashTreeRootWith(hh *ssz.Hasher) (err er
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -2550,11 +2454,7 @@ func (e *ExecutionPayloadHeaderCapella) HashTreeRootWith(hh *ssz.Hasher) (err er
}
hh.PutBytes(e.WithdrawalsRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2734,7 +2634,7 @@ func (e *ExecutionPayloadHeaderDeneb) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 584 {
if o10 != 584 {
return ssz.ErrInvalidVariableOffset
}
@@ -2864,11 +2764,7 @@ func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh *ssz.Hasher) (err erro
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -2905,11 +2801,7 @@ func (e *ExecutionPayloadHeaderDeneb) HashTreeRootWith(hh *ssz.Hasher) (err erro
// Field (16) 'ExcessBlobGas'
hh.PutUint64(e.ExcessBlobGas)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3103,7 +2995,7 @@ func (e *ExecutionPayloadHeaderElectra) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o10 < 648 {
if o10 != 648 {
return ssz.ErrInvalidVariableOffset
}
@@ -3245,11 +3137,7 @@ func (e *ExecutionPayloadHeaderElectra) HashTreeRootWith(hh *ssz.Hasher) (err er
return
}
hh.PutBytes(e.ExtraData)
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(elemIndx, byteLen, (32+31)/32)
} else {
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
hh.MerkleizeWithMixin(elemIndx, byteLen, (32+31)/32)
}
// Field (11) 'BaseFeePerGas'
@@ -3300,11 +3188,7 @@ func (e *ExecutionPayloadHeaderElectra) HashTreeRootWith(hh *ssz.Hasher) (err er
}
hh.PutBytes(e.WithdrawalRequestsRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3393,11 +3277,7 @@ func (w *Withdrawal) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (3) 'Amount'
hh.PutUint64(w.Amount)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3481,7 +3361,7 @@ func (b *BlobsBundle) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 12 {
if o0 != 12 {
return ssz.ErrInvalidVariableOffset
}
@@ -3586,11 +3466,7 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
numItems := uint64(len(b.KzgCommitments))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
} else {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (1) 'Proofs'
@@ -3609,11 +3485,7 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
numItems := uint64(len(b.Proofs))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
} else {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
// Field (2) 'Blobs'
@@ -3632,18 +3504,10 @@ func (b *BlobsBundle) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
numItems := uint64(len(b.Blobs))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, 4096)
} else {
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
hh.MerkleizeWithMixin(subIndx, numItems, 4096)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3734,11 +3598,7 @@ func (w *WithdrawalRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (2) 'Amount'
hh.PutUint64(w.Amount)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -3858,10 +3718,6 @@ func (d *DepositRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (4) 'Index'
hh.PutUint64(d.Index)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}

View File

@@ -35,6 +35,7 @@ load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
ssz_gen_marshal(
name = "ssz_generated_files",
go_proto = ":go_proto",
out = "gateway.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",

View File

@@ -1,5 +1,5 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: d06a72227c2f5e350916cce3e89f4e855135a2a22f6ea263dedc68fa506c1ba7
// Hash: 2874e1dadeb47411763f48fe31e5daaa91ac663e796933d9a508c2e7be94fa5e
package v1
import (
@@ -62,7 +62,7 @@ func (a *Attestation) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 228 {
if o0 != 228 {
return ssz.ErrInvalidVariableOffset
}
@@ -132,11 +132,7 @@ func (a *Attestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(a.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -194,7 +190,7 @@ func (a *AggregateAttestationAndProof) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o1 < 108 {
if o1 != 108 {
return ssz.ErrInvalidVariableOffset
}
@@ -254,11 +250,7 @@ func (a *AggregateAttestationAndProof) HashTreeRootWith(hh *ssz.Hasher) (err err
}
hh.PutBytes(a.SelectionProof)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -310,7 +302,7 @@ func (s *SignedAggregateAttestationAndProof) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 100 {
if o0 != 100 {
return ssz.ErrInvalidVariableOffset
}
@@ -367,11 +359,7 @@ func (s *SignedAggregateAttestationAndProof) HashTreeRootWith(hh *ssz.Hasher) (e
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -493,11 +481,7 @@ func (a *AttestationData) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -568,11 +552,7 @@ func (c *Checkpoint) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(c.Root)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -655,7 +635,7 @@ func (b *BeaconBlock) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o4 < 84 {
if o4 != 84 {
return ssz.ErrInvalidVariableOffset
}
@@ -719,11 +699,7 @@ func (b *BeaconBlock) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -775,7 +751,7 @@ func (s *SignedBeaconBlock) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 100 {
if o0 != 100 {
return ssz.ErrInvalidVariableOffset
}
@@ -832,11 +808,7 @@ func (s *SignedBeaconBlock) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1006,7 +978,7 @@ func (b *BeaconBlockBody) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o3 < 220 {
if o3 != 220 {
return ssz.ErrInvalidVariableOffset
}
@@ -1199,11 +1171,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (4) 'AttesterSlashings'
@@ -1219,11 +1187,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 2)
} else {
hh.MerkleizeWithMixin(subIndx, num, 2)
}
hh.MerkleizeWithMixin(subIndx, num, 2)
}
// Field (5) 'Attestations'
@@ -1239,11 +1203,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 128)
} else {
hh.MerkleizeWithMixin(subIndx, num, 128)
}
hh.MerkleizeWithMixin(subIndx, num, 128)
}
// Field (6) 'Deposits'
@@ -1259,11 +1219,7 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
// Field (7) 'VoluntaryExits'
@@ -1279,18 +1235,10 @@ func (b *BeaconBlockBody) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, num, 16)
} else {
hh.MerkleizeWithMixin(subIndx, num, 16)
}
hh.MerkleizeWithMixin(subIndx, num, 16)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1374,11 +1322,7 @@ func (p *ProposerSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1435,7 +1379,7 @@ func (a *AttesterSlashing) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 8 {
if o0 != 8 {
return ssz.ErrInvalidVariableOffset
}
@@ -1506,11 +1450,7 @@ func (a *AttesterSlashing) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1604,12 +1544,7 @@ func (d *Deposit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.Append(i)
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(subIndx)
} else {
hh.Merkleize(subIndx)
}
hh.Merkleize(subIndx)
}
// Field (1) 'Data'
@@ -1617,11 +1552,7 @@ func (d *Deposit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
return
}
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1681,11 +1612,7 @@ func (v *VoluntaryExit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (1) 'ValidatorIndex'
hh.PutUint64(uint64(v.ValidatorIndex))
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1768,11 +1695,7 @@ func (s *SignedVoluntaryExit) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1863,11 +1786,7 @@ func (e *Eth1Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(e.BlockHash)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -1987,11 +1906,7 @@ func (b *BeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(b.BodyRoot)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2074,11 +1989,7 @@ func (s *SignedBeaconBlockHeader) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2139,7 +2050,7 @@ func (i *IndexedAttestation) UnmarshalSSZ(buf []byte) error {
return ssz.ErrOffset
}
if o0 < 228 {
if o0 != 228 {
return ssz.ErrInvalidVariableOffset
}
@@ -2204,11 +2115,7 @@ func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
hh.FillUpTo32()
numItems := uint64(len(i.AttestingIndices))
if ssz.EnableVectorizedHTR {
hh.MerkleizeWithMixinVectorizedHTR(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
} else {
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
}
hh.MerkleizeWithMixin(subIndx, numItems, ssz.CalculateLimit(2048, numItems, 8))
}
// Field (1) 'Data'
@@ -2223,11 +2130,7 @@ func (i *IndexedAttestation) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(i.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2309,11 +2212,7 @@ func (s *SyncAggregate) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(s.SyncCommitteeSignature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2424,11 +2323,7 @@ func (d *Deposit_Data) HashTreeRootWith(hh *ssz.Hasher) (err error) {
}
hh.PutBytes(d.Signature)
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}
@@ -2564,10 +2459,6 @@ func (v *Validator) HashTreeRootWith(hh *ssz.Hasher) (err error) {
// Field (7) 'WithdrawableEpoch'
hh.PutUint64(uint64(v.WithdrawableEpoch))
if ssz.EnableVectorizedHTR {
hh.MerkleizeVectorizedHTR(indx)
} else {
hh.Merkleize(indx)
}
hh.Merkleize(indx)
return
}

View File

@@ -31,6 +31,7 @@ load("//tools:ssz.bzl", "SSZ_DEPS", "ssz_gen_marshal")
ssz_gen_marshal(
name = "ssz_generated_files",
go_proto = ":go_proto",
out = "grpc.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",

File diff suppressed because it is too large Load Diff

View File

@@ -45,111 +45,211 @@ proto_library(
##############################################################################
# Go
##############################################################################
ssz_phase0_objs = [
"AggregateAttestationAndProof",
"Attestation",
"AttestationData",
"AttesterSlashing",
"BeaconBlock",
"BeaconBlockHeader",
"BeaconState",
"Checkpoint",
"Deposit",
"Deposit_Data",
"DepositMessage",
"ENRForkID",
"Eth1Data",
"Fork",
"ForkData",
"HistoricalBatch",
"IndexedAttestation",
"PowBlock",
"ProposerSlashing",
"SignedAggregateAttestationAndProof",
"SignedBeaconBlock",
"SignedBeaconBlockHeader",
"SignedVoluntaryExit",
"SigningData",
"Status",
"Status",
"Validator",
"VoluntaryExit",
]
ssz_altair_objs = [
"BeaconBlockAltair",
"BeaconBlockBodyAltair",
"BeaconStateAltair",
"ContributionAndProof",
"SignedBeaconBlockAltair",
"SignedContributionAndProof",
"SyncAggregate",
"SyncAggregate",
"SyncAggregatorSelectionData",
"SyncCommittee",
"SyncCommitteeContribution",
"SyncCommitteeMessage",
]
ssz_bellatrix_objs = [
"BeaconBlockBellatrix",
"BeaconBlockBodyBellatrix",
"BeaconStateBellatrix",
"BlindedBeaconBlockBellatrix",
"BlindedBeaconBlockBodyBellatrix",
"SignedBeaconBlockBellatrix",
"SignedBlindedBeaconBlockBellatrix",
]
ssz_capella_objs = [
"BLSToExecutionChange",
"BeaconBlockBodyCapella",
"BeaconBlockCapella",
"BeaconStateCapella",
"BlindedBeaconBlockBodyCapella",
"BlindedBeaconBlockCapella",
"BuilderBidCapella",
"HistoricalSummary",
"SignedBLSToExecutionChange",
"SignedBeaconBlockCapella",
"SignedBlindedBeaconBlockCapella",
"Withdrawal",
]
ssz_deneb_objs = [
"BeaconBlockBodyDeneb",
"BeaconBlockContentsDeneb",
"BeaconBlockDeneb",
"BeaconStateDeneb",
"BlindedBeaconBlockBodyDeneb",
"BlindedBeaconBlockDeneb",
"BlobIdentifier",
"BlobSidecar",
"BlobSidecars",
"BuilderBidDeneb",
"SignedBeaconBlockContentsDeneb",
"SignedBeaconBlockDeneb",
"SignedBlindedBeaconBlockDeneb",
]
ssz_electra_objs = [
"AggregateAttestationAndProofElectra",
"AttestationElectra",
"AttesterSlashingElectra",
"BeaconBlockElectra",
"BeaconBlockElectra",
"BeaconStateElectra",
"BlindedBeaconBlockBodyElectra",
"BlindedBeaconBlockElectra",
"Consolidation",
"IndexedAttestationElectra",
"PendingBalanceDeposit",
"PendingBalanceDeposits",
"PendingConsolidation",
"PendingPartialWithdrawal",
"SignedAggregateAttestationAndProofElectra",
"SignedBeaconBlockElectra",
"SignedBlindedBeaconBlockElectra",
"SignedConsolidation",
]
ssz_gen_marshal(
name = "ssz_generated_files",
name = "ssz_generated_phase0",
go_proto = ":go_proto",
out = "phase0.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_phase0_objs,
)
ssz_gen_marshal(
name = "ssz_generated_altair",
go_proto = ":go_proto",
out = "altair.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_altair_objs,
exclude_objs = ssz_phase0_objs,
)
ssz_gen_marshal(
name = "ssz_generated_bellatrix",
go_proto = ":go_proto",
out = "bellatrix.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_bellatrix_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs,
)
ssz_gen_marshal(
name = "ssz_generated_capella",
go_proto = ":go_proto",
out = "capella.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_capella_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs,
)
ssz_gen_marshal(
name = "ssz_generated_deneb",
go_proto = ":go_proto",
out = "deneb.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_deneb_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs,
)
ssz_gen_marshal(
name = "ssz_generated_electra",
go_proto = ":go_proto",
out = "electra.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = ssz_electra_objs,
exclude_objs = ssz_phase0_objs + ssz_altair_objs + ssz_bellatrix_objs + ssz_capella_objs + ssz_deneb_objs,
)
ssz_gen_marshal(
name = "ssz_generated_non_core",
go_proto = ":go_proto",
out = "non-core.ssz.go",
includes = [
"//consensus-types/primitives:go_default_library",
"//proto/engine/v1:go_default_library",
"//math:go_default_library",
],
objs = [
"BeaconBlockAltair",
"BeaconBlockBodyAltair",
"SignedBeaconBlockAltair",
"BeaconBlockBellatrix",
"BeaconBlockBodyBellatrix",
"SignedBeaconBlockBellatrix",
"SignedBlindedBeaconBlockBellatrix",
"BlindedBeaconBlockBellatrix",
"BlindedBeaconBlockBodyBellatrix",
"BeaconBlockCapella",
"BeaconBlockBodyCapella",
"SignedBeaconBlockCapella",
"BeaconBlockDeneb",
"BeaconBlockBodyDeneb",
"SignedBeaconBlockDeneb",
"BeaconBlockElectra",
"BeaconBlockElectra",
"SignedBeaconBlockElectra",
"SignedBlindedBeaconBlockCapella",
"BlindedBeaconBlockCapella",
"BlindedBeaconBlockBodyCapella",
"SignedBlindedBeaconBlockDeneb",
"BeaconBlockContentsDeneb",
"SignedBeaconBlockContentsDeneb",
"BlindedBeaconBlockDeneb",
"BlindedBeaconBlockBodyDeneb",
"SignedBlindedBeaconBlockElectra",
"BlindedBeaconBlockElectra",
"BlindedBeaconBlockBodyElectra",
"SyncAggregate",
"SyncCommitteeMessage",
"SyncCommitteeContribution",
"ContributionAndProof",
"SignedContributionAndProof",
"BeaconBlocksByRangeRequest",
"BlobSidecarsByRangeRequest",
"ENRForkID",
"MetaDataV0",
"MetaDataV1",
"Status",
"AggregateAttestationAndProof",
"AggregateAttestationAndProofElectra",
"Attestation",
"AttestationElectra",
"AttestationData",
"AttesterSlashing",
"AttesterSlashingElectra",
"BeaconBlock",
"BeaconBlockHeader",
"Checkpoint",
"Deposit",
"Eth1Data",
"IndexedAttestation",
"IndexedAttestationElectra",
"ProposerSlashing",
"SignedAggregateAttestationAndProof",
"SignedAggregateAttestationAndProofElectra",
"SignedBeaconBlock",
"SignedBeaconBlockHeader",
"SignedVoluntaryExit",
"Validator",
"VoluntaryExit",
"ContributionAndProof",
"SignedContributionAndProof",
"DepositMessage",
"Fork",
"ForkData",
"HistoricalBatch",
"Status",
"BeaconState",
"BeaconStateAltair",
"BeaconStateBellatrix",
"BeaconStateCapella",
"BeaconStateDeneb",
"BeaconStateElectra",
"SigningData",
"SyncCommittee",
"SyncAggregatorSelectionData",
"PowBlock",
"SignedValidatorRegistrationV1",
"ValidatorRegistrationV1",
"Withdrawal",
"BLSToExecutionChange",
"SignedBLSToExecutionChange",
"BuilderBid",
"BuilderBidCapella",
"BuilderBidDeneb",
"BlobSidecar",
"BlobSidecars",
"BlobIdentifier",
"DepositSnapshot",
"PendingBalanceDeposit",
"PendingPartialWithdrawal",
"Consolidation",
"SignedConsolidation",
"PendingConsolidation",
],
)
@@ -212,7 +312,13 @@ go_library(
"eip_7251.go",
"sync_committee_mainnet.go",
"sync_committee_minimal.go", # keep
":ssz_generated_files", # keep
":ssz_generated_non_core", # keep
":ssz_generated_phase0", # keep
":ssz_generated_altair", # keep
":ssz_generated_bellatrix", # keep
":ssz_generated_capella", # keep
":ssz_generated_deneb", # keep
":ssz_generated_electra", # keep
],
embed = [
":go_grpc_gateway_library",

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,780 @@
// Code generated by fastssz. DO NOT EDIT.
// Hash: 9da8a498bd074553137a73197dadcae4d3b4239484f64bab4f0a734dce528d24
package eth
import (
ssz "github.com/prysmaticlabs/fastssz"
github_com_prysmaticlabs_prysm_v5_consensus_types_primitives "github.com/prysmaticlabs/prysm/v5/consensus-types/primitives"
v1 "github.com/prysmaticlabs/prysm/v5/proto/engine/v1"
)
// MarshalSSZ ssz marshals the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(v)
}
// MarshalSSZTo ssz marshals the ValidatorRegistrationV1 object to a target array
func (v *ValidatorRegistrationV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'FeeRecipient'
if size := len(v.FeeRecipient); size != 20 {
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
return
}
dst = append(dst, v.FeeRecipient...)
// Field (1) 'GasLimit'
dst = ssz.MarshalUint64(dst, v.GasLimit)
// Field (2) 'Timestamp'
dst = ssz.MarshalUint64(dst, v.Timestamp)
// Field (3) 'Pubkey'
if size := len(v.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
dst = append(dst, v.Pubkey...)
return
}
// UnmarshalSSZ ssz unmarshals the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 84 {
return ssz.ErrSize
}
// Field (0) 'FeeRecipient'
if cap(v.FeeRecipient) == 0 {
v.FeeRecipient = make([]byte, 0, len(buf[0:20]))
}
v.FeeRecipient = append(v.FeeRecipient, buf[0:20]...)
// Field (1) 'GasLimit'
v.GasLimit = ssz.UnmarshallUint64(buf[20:28])
// Field (2) 'Timestamp'
v.Timestamp = ssz.UnmarshallUint64(buf[28:36])
// Field (3) 'Pubkey'
if cap(v.Pubkey) == 0 {
v.Pubkey = make([]byte, 0, len(buf[36:84]))
}
v.Pubkey = append(v.Pubkey, buf[36:84]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) SizeSSZ() (size int) {
size = 84
return
}
// HashTreeRoot ssz hashes the ValidatorRegistrationV1 object
func (v *ValidatorRegistrationV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(v)
}
// HashTreeRootWith ssz hashes the ValidatorRegistrationV1 object with a hasher
func (v *ValidatorRegistrationV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'FeeRecipient'
if size := len(v.FeeRecipient); size != 20 {
err = ssz.ErrBytesLengthFn("--.FeeRecipient", size, 20)
return
}
hh.PutBytes(v.FeeRecipient)
// Field (1) 'GasLimit'
hh.PutUint64(v.GasLimit)
// Field (2) 'Timestamp'
hh.PutUint64(v.Timestamp)
// Field (3) 'Pubkey'
if size := len(v.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
hh.PutBytes(v.Pubkey)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(s)
}
// MarshalSSZTo ssz marshals the SignedValidatorRegistrationV1 object to a target array
func (s *SignedValidatorRegistrationV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ValidatorRegistrationV1)
}
if dst, err = s.Message.MarshalSSZTo(dst); err != nil {
return
}
// Field (1) 'Signature'
if size := len(s.Signature); size != 96 {
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
return
}
dst = append(dst, s.Signature...)
return
}
// UnmarshalSSZ ssz unmarshals the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 180 {
return ssz.ErrSize
}
// Field (0) 'Message'
if s.Message == nil {
s.Message = new(ValidatorRegistrationV1)
}
if err = s.Message.UnmarshalSSZ(buf[0:84]); err != nil {
return err
}
// Field (1) 'Signature'
if cap(s.Signature) == 0 {
s.Signature = make([]byte, 0, len(buf[84:180]))
}
s.Signature = append(s.Signature, buf[84:180]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) SizeSSZ() (size int) {
size = 180
return
}
// HashTreeRoot ssz hashes the SignedValidatorRegistrationV1 object
func (s *SignedValidatorRegistrationV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(s)
}
// HashTreeRootWith ssz hashes the SignedValidatorRegistrationV1 object with a hasher
func (s *SignedValidatorRegistrationV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Message'
if err = s.Message.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'Signature'
if size := len(s.Signature); size != 96 {
err = ssz.ErrBytesLengthFn("--.Signature", size, 96)
return
}
hh.PutBytes(s.Signature)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BuilderBid object
func (b *BuilderBid) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BuilderBid object to a target array
func (b *BuilderBid) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(84)
// Offset (0) 'Header'
dst = ssz.WriteOffset(dst, offset)
if b.Header == nil {
b.Header = new(v1.ExecutionPayloadHeader)
}
offset += b.Header.SizeSSZ()
// Field (1) 'Value'
if size := len(b.Value); size != 32 {
err = ssz.ErrBytesLengthFn("--.Value", size, 32)
return
}
dst = append(dst, b.Value...)
// Field (2) 'Pubkey'
if size := len(b.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
dst = append(dst, b.Pubkey...)
// Field (0) 'Header'
if dst, err = b.Header.MarshalSSZTo(dst); err != nil {
return
}
return
}
// UnmarshalSSZ ssz unmarshals the BuilderBid object
func (b *BuilderBid) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 84 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Header'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 84 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'Value'
if cap(b.Value) == 0 {
b.Value = make([]byte, 0, len(buf[4:36]))
}
b.Value = append(b.Value, buf[4:36]...)
// Field (2) 'Pubkey'
if cap(b.Pubkey) == 0 {
b.Pubkey = make([]byte, 0, len(buf[36:84]))
}
b.Pubkey = append(b.Pubkey, buf[36:84]...)
// Field (0) 'Header'
{
buf = tail[o0:]
if b.Header == nil {
b.Header = new(v1.ExecutionPayloadHeader)
}
if err = b.Header.UnmarshalSSZ(buf); err != nil {
return err
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BuilderBid object
func (b *BuilderBid) SizeSSZ() (size int) {
size = 84
// Field (0) 'Header'
if b.Header == nil {
b.Header = new(v1.ExecutionPayloadHeader)
}
size += b.Header.SizeSSZ()
return
}
// HashTreeRoot ssz hashes the BuilderBid object
func (b *BuilderBid) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BuilderBid object with a hasher
func (b *BuilderBid) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Header'
if err = b.Header.HashTreeRootWith(hh); err != nil {
return
}
// Field (1) 'Value'
if size := len(b.Value); size != 32 {
err = ssz.ErrBytesLengthFn("--.Value", size, 32)
return
}
hh.PutBytes(b.Value)
// Field (2) 'Pubkey'
if size := len(b.Pubkey); size != 48 {
err = ssz.ErrBytesLengthFn("--.Pubkey", size, 48)
return
}
hh.PutBytes(b.Pubkey)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BeaconBlocksByRangeRequest object to a target array
func (b *BeaconBlocksByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'StartSlot'
dst = ssz.MarshalUint64(dst, uint64(b.StartSlot))
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, b.Count)
// Field (2) 'Step'
dst = ssz.MarshalUint64(dst, b.Step)
return
}
// UnmarshalSSZ ssz unmarshals the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 24 {
return ssz.ErrSize
}
// Field (0) 'StartSlot'
b.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
// Field (1) 'Count'
b.Count = ssz.UnmarshallUint64(buf[8:16])
// Field (2) 'Step'
b.Step = ssz.UnmarshallUint64(buf[16:24])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) SizeSSZ() (size int) {
size = 24
return
}
// HashTreeRoot ssz hashes the BeaconBlocksByRangeRequest object
func (b *BeaconBlocksByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BeaconBlocksByRangeRequest object with a hasher
func (b *BeaconBlocksByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'StartSlot'
hh.PutUint64(uint64(b.StartSlot))
// Field (1) 'Count'
hh.PutUint64(b.Count)
// Field (2) 'Step'
hh.PutUint64(b.Step)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the MetaDataV0 object
func (m *MetaDataV0) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetaDataV0 object to a target array
func (m *MetaDataV0) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
dst = append(dst, m.Attnets...)
return
}
// UnmarshalSSZ ssz unmarshals the MetaDataV0 object
func (m *MetaDataV0) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
if cap(m.Attnets) == 0 {
m.Attnets = make([]byte, 0, len(buf[8:16]))
}
m.Attnets = append(m.Attnets, buf[8:16]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV0 object
func (m *MetaDataV0) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the MetaDataV0 object
func (m *MetaDataV0) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetaDataV0 object with a hasher
func (m *MetaDataV0) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
hh.PutBytes(m.Attnets)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the MetaDataV1 object
func (m *MetaDataV1) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(m)
}
// MarshalSSZTo ssz marshals the MetaDataV1 object to a target array
func (m *MetaDataV1) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'SeqNumber'
dst = ssz.MarshalUint64(dst, m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
dst = append(dst, m.Attnets...)
// Field (2) 'Syncnets'
if size := len(m.Syncnets); size != 1 {
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
return
}
dst = append(dst, m.Syncnets...)
return
}
// UnmarshalSSZ ssz unmarshals the MetaDataV1 object
func (m *MetaDataV1) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 17 {
return ssz.ErrSize
}
// Field (0) 'SeqNumber'
m.SeqNumber = ssz.UnmarshallUint64(buf[0:8])
// Field (1) 'Attnets'
if cap(m.Attnets) == 0 {
m.Attnets = make([]byte, 0, len(buf[8:16]))
}
m.Attnets = append(m.Attnets, buf[8:16]...)
// Field (2) 'Syncnets'
if cap(m.Syncnets) == 0 {
m.Syncnets = make([]byte, 0, len(buf[16:17]))
}
m.Syncnets = append(m.Syncnets, buf[16:17]...)
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the MetaDataV1 object
func (m *MetaDataV1) SizeSSZ() (size int) {
size = 17
return
}
// HashTreeRoot ssz hashes the MetaDataV1 object
func (m *MetaDataV1) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(m)
}
// HashTreeRootWith ssz hashes the MetaDataV1 object with a hasher
func (m *MetaDataV1) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'SeqNumber'
hh.PutUint64(m.SeqNumber)
// Field (1) 'Attnets'
if size := len(m.Attnets); size != 8 {
err = ssz.ErrBytesLengthFn("--.Attnets", size, 8)
return
}
hh.PutBytes(m.Attnets)
// Field (2) 'Syncnets'
if size := len(m.Syncnets); size != 1 {
err = ssz.ErrBytesLengthFn("--.Syncnets", size, 1)
return
}
hh.PutBytes(m.Syncnets)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(b)
}
// MarshalSSZTo ssz marshals the BlobSidecarsByRangeRequest object to a target array
func (b *BlobSidecarsByRangeRequest) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
// Field (0) 'StartSlot'
dst = ssz.MarshalUint64(dst, uint64(b.StartSlot))
// Field (1) 'Count'
dst = ssz.MarshalUint64(dst, b.Count)
return
}
// UnmarshalSSZ ssz unmarshals the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size != 16 {
return ssz.ErrSize
}
// Field (0) 'StartSlot'
b.StartSlot = github_com_prysmaticlabs_prysm_v5_consensus_types_primitives.Slot(ssz.UnmarshallUint64(buf[0:8]))
// Field (1) 'Count'
b.Count = ssz.UnmarshallUint64(buf[8:16])
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) SizeSSZ() (size int) {
size = 16
return
}
// HashTreeRoot ssz hashes the BlobSidecarsByRangeRequest object
func (b *BlobSidecarsByRangeRequest) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(b)
}
// HashTreeRootWith ssz hashes the BlobSidecarsByRangeRequest object with a hasher
func (b *BlobSidecarsByRangeRequest) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'StartSlot'
hh.PutUint64(uint64(b.StartSlot))
// Field (1) 'Count'
hh.PutUint64(b.Count)
hh.Merkleize(indx)
return
}
// MarshalSSZ ssz marshals the DepositSnapshot object
func (d *DepositSnapshot) MarshalSSZ() ([]byte, error) {
return ssz.MarshalSSZ(d)
}
// MarshalSSZTo ssz marshals the DepositSnapshot object to a target array
func (d *DepositSnapshot) MarshalSSZTo(buf []byte) (dst []byte, err error) {
dst = buf
offset := int(84)
// Offset (0) 'Finalized'
dst = ssz.WriteOffset(dst, offset)
offset += len(d.Finalized) * 32
// Field (1) 'DepositRoot'
if size := len(d.DepositRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.DepositRoot", size, 32)
return
}
dst = append(dst, d.DepositRoot...)
// Field (2) 'DepositCount'
dst = ssz.MarshalUint64(dst, d.DepositCount)
// Field (3) 'ExecutionHash'
if size := len(d.ExecutionHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.ExecutionHash", size, 32)
return
}
dst = append(dst, d.ExecutionHash...)
// Field (4) 'ExecutionDepth'
dst = ssz.MarshalUint64(dst, d.ExecutionDepth)
// Field (0) 'Finalized'
if size := len(d.Finalized); size > 32 {
err = ssz.ErrListTooBigFn("--.Finalized", size, 32)
return
}
for ii := 0; ii < len(d.Finalized); ii++ {
if size := len(d.Finalized[ii]); size != 32 {
err = ssz.ErrBytesLengthFn("--.Finalized[ii]", size, 32)
return
}
dst = append(dst, d.Finalized[ii]...)
}
return
}
// UnmarshalSSZ ssz unmarshals the DepositSnapshot object
func (d *DepositSnapshot) UnmarshalSSZ(buf []byte) error {
var err error
size := uint64(len(buf))
if size < 84 {
return ssz.ErrSize
}
tail := buf
var o0 uint64
// Offset (0) 'Finalized'
if o0 = ssz.ReadOffset(buf[0:4]); o0 > size {
return ssz.ErrOffset
}
if o0 != 84 {
return ssz.ErrInvalidVariableOffset
}
// Field (1) 'DepositRoot'
if cap(d.DepositRoot) == 0 {
d.DepositRoot = make([]byte, 0, len(buf[4:36]))
}
d.DepositRoot = append(d.DepositRoot, buf[4:36]...)
// Field (2) 'DepositCount'
d.DepositCount = ssz.UnmarshallUint64(buf[36:44])
// Field (3) 'ExecutionHash'
if cap(d.ExecutionHash) == 0 {
d.ExecutionHash = make([]byte, 0, len(buf[44:76]))
}
d.ExecutionHash = append(d.ExecutionHash, buf[44:76]...)
// Field (4) 'ExecutionDepth'
d.ExecutionDepth = ssz.UnmarshallUint64(buf[76:84])
// Field (0) 'Finalized'
{
buf = tail[o0:]
num, err := ssz.DivideInt2(len(buf), 32, 32)
if err != nil {
return err
}
d.Finalized = make([][]byte, num)
for ii := 0; ii < num; ii++ {
if cap(d.Finalized[ii]) == 0 {
d.Finalized[ii] = make([]byte, 0, len(buf[ii*32:(ii+1)*32]))
}
d.Finalized[ii] = append(d.Finalized[ii], buf[ii*32:(ii+1)*32]...)
}
}
return err
}
// SizeSSZ returns the ssz encoded size in bytes for the DepositSnapshot object
func (d *DepositSnapshot) SizeSSZ() (size int) {
size = 84
// Field (0) 'Finalized'
size += len(d.Finalized) * 32
return
}
// HashTreeRoot ssz hashes the DepositSnapshot object
func (d *DepositSnapshot) HashTreeRoot() ([32]byte, error) {
return ssz.HashWithDefaultHasher(d)
}
// HashTreeRootWith ssz hashes the DepositSnapshot object with a hasher
func (d *DepositSnapshot) HashTreeRootWith(hh *ssz.Hasher) (err error) {
indx := hh.Index()
// Field (0) 'Finalized'
{
if size := len(d.Finalized); size > 32 {
err = ssz.ErrListTooBigFn("--.Finalized", size, 32)
return
}
subIndx := hh.Index()
for _, i := range d.Finalized {
if len(i) != 32 {
err = ssz.ErrBytesLength
return
}
hh.Append(i)
}
numItems := uint64(len(d.Finalized))
hh.MerkleizeWithMixin(subIndx, numItems, 32)
}
// Field (1) 'DepositRoot'
if size := len(d.DepositRoot); size != 32 {
err = ssz.ErrBytesLengthFn("--.DepositRoot", size, 32)
return
}
hh.PutBytes(d.DepositRoot)
// Field (2) 'DepositCount'
hh.PutUint64(d.DepositCount)
// Field (3) 'ExecutionHash'
if size := len(d.ExecutionHash); size != 32 {
err = ssz.ErrBytesLengthFn("--.ExecutionHash", size, 32)
return
}
hh.PutBytes(d.ExecutionHash)
// Field (4) 'ExecutionDepth'
hh.PutUint64(d.ExecutionDepth)
hh.Merkleize(indx)
return
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,32 @@
load("@io_bazel_rules_go//go:def.bzl", "go_binary")
load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"main.go",
"metrics.go",
],
importpath = "github.com/prysmaticlabs/prysm/v5/tools/blob-watcher",
visibility = ["//visibility:private"],
deps = [
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_ethereum_go_ethereum//consensus/misc/eip4844:go_default_library",
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
"@com_github_ethereum_go_ethereum//ethclient/gethclient:go_default_library",
"@com_github_ethereum_go_ethereum//params:go_default_library",
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promhttp:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)
go_binary(
name = "blob-watcher",
embed = [":go_default_library"],
visibility = ["//visibility:public"],
)

View File

@@ -0,0 +1,26 @@
# BlobWatcher
Blobwatcher is a tool to monitor your execution client's mempool for blob transactions
and determine how long it takes for them to get included. Data tracked by the tool:
- BaseFee monitoring for both blobs and the network
- Users propagating blob transactions along with the appropriate labelling for popular rollups.
- Builder Monitoring by blob transactions included
- Transaction Pool Monitoring For Blobs
This tool currently only works using a websocket endpoint.
```
bazel run //tools/blob-watcher:blob-watcher -- --execution-endpoint ws://localhost:8546 --metrics-endpoint localhost:8080
```
Flags:
```
-execution-endpoint string
Path to webscocket endpoint for execution client. (default "ws://localhost:8546")
-metrics-endpoint string
Path for our metrics server. (default "localhost:8080")
-origin-secret string
Origin string for websocket connection
```

View File

@@ -0,0 +1,704 @@
{
"annotations": {
"list": [
{
"builtIn": 1,
"datasource": {
"type": "grafana",
"uid": "-- Grafana --"
},
"enable": true,
"hide": true,
"iconColor": "rgba(0, 211, 255, 1)",
"name": "Annotations & Alerts",
"type": "dashboard"
}
]
},
"editable": true,
"fiscalYearStartMonth": 0,
"graphTooltip": 0,
"id": 1,
"links": [],
"panels": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 0
},
"id": 5,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "increase(blob_inclusion_by_builder[30s]) / increase(builder_blocks[30s])",
"instant": false,
"interval": "",
"legendFormat": "{{builder}}",
"range": true,
"refId": "A"
}
],
"title": "Builder Blob Inclusion",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "points",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 2,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 0
},
"id": 6,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "avg by (builder) (blob_inclusion_by_builder / builder_blocks)",
"instant": false,
"interval": "",
"legendFormat": "Average-{{builder}}",
"range": true,
"refId": "A"
}
],
"title": "Average Builder Blob Inclusion",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 8
},
"id": 4,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "viable_transaction",
"instant": false,
"legendFormat": "Viable Transactions",
"range": true,
"refId": "A"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "viable_blobs",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Viable Blobs",
"range": true,
"refId": "D"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "increase(transaction_inclusion[30s])/increase(block_number[30s])",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Transactions Included In Block",
"range": true,
"refId": "B"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "increase(blob_inclusion[30s])/increase(block_number[30s])",
"hide": false,
"instant": false,
"interval": "",
"legendFormat": "Blobs Included In Block",
"range": true,
"refId": "C"
}
],
"title": "Blob Mempool",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 8
},
"id": 1,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"pluginVersion": "11.0.0",
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"exemplar": false,
"expr": "increase(transaction_inclusion_delay_sum{account=\"$Account\"}[30s])/increase(transaction_inclusion_delay_count{account=\"$Account\"}[30s])",
"format": "time_series",
"instant": false,
"interval": "",
"legendFormat": "$Account",
"range": true,
"refId": "A"
}
],
"title": "Blob Transaction Inclusion Delay",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "line",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 5,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 0,
"y": 16
},
"id": 3,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "blob_base_fee",
"instant": false,
"legendFormat": "Blob Base Fee",
"range": true,
"refId": "A"
}
],
"title": "Blob Base Fee",
"type": "timeseries"
},
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"description": "",
"fieldConfig": {
"defaults": {
"color": {
"mode": "palette-classic"
},
"custom": {
"axisBorderShow": false,
"axisCenteredZero": false,
"axisColorMode": "text",
"axisLabel": "",
"axisPlacement": "auto",
"barAlignment": 0,
"drawStyle": "bars",
"fillOpacity": 0,
"gradientMode": "none",
"hideFrom": {
"legend": false,
"tooltip": false,
"viz": false
},
"insertNulls": false,
"lineInterpolation": "linear",
"lineWidth": 1,
"pointSize": 2,
"scaleDistribution": {
"type": "linear"
},
"showPoints": "auto",
"spanNulls": false,
"stacking": {
"group": "A",
"mode": "none"
},
"thresholdsStyle": {
"mode": "off"
}
},
"mappings": [],
"thresholds": {
"mode": "absolute",
"steps": [
{
"color": "green",
"value": null
},
{
"color": "red",
"value": 80
}
]
}
},
"overrides": []
},
"gridPos": {
"h": 8,
"w": 12,
"x": 12,
"y": 16
},
"id": 2,
"options": {
"legend": {
"calcs": [],
"displayMode": "list",
"placement": "bottom",
"showLegend": true
},
"tooltip": {
"maxHeight": 600,
"mode": "single",
"sort": "none"
}
},
"targets": [
{
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"editorMode": "code",
"expr": "increase(transactions_observed{account=\"$Account\"}[30s])",
"instant": false,
"interval": "",
"legendFormat": "$Account-blobFee - {{maxBlobBaseFee}}",
"range": true,
"refId": "A"
}
],
"title": "Transaction Blob Fee",
"type": "timeseries"
}
],
"refresh": "",
"schemaVersion": 39,
"tags": [],
"templating": {
"list": [
{
"current": {
"selected": true,
"text": [
"Arbitrum"
],
"value": [
"Arbitrum"
]
},
"datasource": {
"type": "prometheus",
"uid": "cdpm0r0qz6328a"
},
"definition": "label_values(account)",
"hide": 0,
"includeAll": false,
"label": "",
"multi": true,
"name": "Account",
"options": [],
"query": {
"qryType": 1,
"query": "label_values(account)",
"refId": "PrometheusVariableQueryEditor-VariableQuery"
},
"refresh": 1,
"regex": "",
"skipUrlSync": false,
"sort": 0,
"type": "query"
}
]
},
"time": {
"from": "now-1h",
"to": "now"
},
"timeRangeUpdatedDuringEditOrView": false,
"timepicker": {},
"timezone": "browser",
"title": "BlobWatcher Dashboard",
"uid": "bdpm1e8v5m48we",
"version": 18,
"weekStart": ""
}

236
tools/blob-watcher/main.go Normal file
View File

@@ -0,0 +1,236 @@
package main
import (
"context"
"flag"
"fmt"
"math/big"
"strings"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/ethereum/go-ethereum/consensus/misc/eip4844"
gethtypes "github.com/ethereum/go-ethereum/core/types"
"github.com/ethereum/go-ethereum/ethclient"
"github.com/ethereum/go-ethereum/ethclient/gethclient"
"github.com/ethereum/go-ethereum/params"
"github.com/ethereum/go-ethereum/rpc"
log "github.com/sirupsen/logrus"
)
var (
// Required fields
executionEndpoint = flag.String("execution-endpoint", "ws://localhost:8546", "Path to webscocket endpoint for execution client.")
wsOrigin = flag.String("origin-secret", "", "Origin string for websocket connection")
metricsEndpoint = flag.String("metrics-endpoint", "localhost:8080", "Path for our metrics server.")
)
func main() {
flag.Parse()
log.Info("Starting blob watcher service")
log.Infof("Using websocket endpoint of %s", *executionEndpoint)
srv := StartMetricsServer(*metricsEndpoint)
defer func() {
if err := srv.Close(); err != nil {
log.Error(err)
}
}()
client, err := rpc.DialWebsocket(context.Background(), *executionEndpoint, *wsOrigin)
if err != nil {
log.Fatal(err)
}
ec := ethclient.NewClient(client)
gc := gethclient.New(client)
txChan := make(chan *gethtypes.Transaction, 100)
pSub, err := gc.SubscribeFullPendingTransactions(context.Background(), txChan)
if err != nil {
log.Fatal(err)
}
hdrChan := make(chan *gethtypes.Header, 100)
hSub, err := ec.SubscribeNewHead(context.Background(), hdrChan)
if err != nil {
log.Fatal(err)
}
chainID, err := ec.ChainID(context.Background())
if err != nil {
log.Fatal(err)
}
currBaseFee := new(big.Int)
pendingTxs := make(map[common.Hash]*gethtypes.Transaction)
txTime := make(map[common.Hash]time.Time)
for {
select {
case err := <-pSub.Err():
log.WithError(err).Error("Pending transaction subscription error")
ec.Close()
client.Close()
close(txChan)
close(hdrChan)
hSub.Unsubscribe()
return
case <-hSub.Err():
log.WithError(err).Error("New head subscription error")
ec.Close()
client.Close()
close(txChan)
close(hdrChan)
pSub.Unsubscribe()
return
case tx := <-txChan:
if tx.Type() == gethtypes.BlobTxType {
tHash := tx.Hash()
log.WithFields(txData(tx, chainID)).Infof("Received new Transaction from Gossip")
recordTxMetrics(tx, chainID)
pendingTxs[tHash] = tx
txTime[tHash] = time.Now()
}
case h := <-hdrChan:
if h.ExcessBlobGas != nil {
currBaseFee = eip4844.CalcBlobFee(*h.ExcessBlobGas)
}
log.Infof("*/-------------------------------------------------------------------------------------------------------------------------------------------------------------------*/")
log.WithFields(log.Fields{
"blockHash": h.Hash(),
"blockNumber": h.Number.Uint64(),
"blockTime": h.Time,
"blobBaseFee(wei)": currBaseFee.Uint64(),
"baseFee(Gwei)": float64(h.BaseFee.Uint64()) / params.GWei,
"builder": strings.ToValidUTF8(string(h.Extra), ""),
}).Infof("Received new block")
blockNumberGauge.Set(float64(h.Number.Uint64()))
blobBaseFeeGauge.Set(float64(currBaseFee.Uint64()))
currentPendingTxs := len(pendingTxs)
blobsIncluded := 0
viabletxs := 0
viableBlobs := 0
for hash, tx := range pendingTxs {
r, err := ec.TransactionReceipt(context.Background(), hash)
if err == nil && r.BlockHash == h.Hash() {
log.WithFields(txData(tx, chainID)).Infof("Transaction was included in block %d in %s", r.BlockNumber.Uint64(), time.Since(txTime[hash]))
recordTxInclusion(tx, chainID, time.Since(txTime[hash]))
blobsIncluded += len(tx.BlobHashes())
delete(pendingTxs, hash)
delete(txTime, hash)
continue
}
acc, err := gethtypes.Sender(gethtypes.NewCancunSigner(chainID), tx)
if err != nil {
log.WithError(err).Error("Could not get sender's account address")
continue
}
currNonce, err := ec.NonceAtHash(context.Background(), acc, h.Hash())
if err != nil {
log.WithError(err).Error("Could not get sender's account nonce")
continue
}
if tx.Nonce() < currNonce {
log.WithFields(txData(tx, chainID)).Infof("Transaction has been successfully replaced and included on chain in %s", time.Since(txTime[hash]))
delete(pendingTxs, hash)
delete(txTime, hash)
continue
}
if tx.Nonce() != currNonce {
// This is not an immediate transaction that can be included.
continue
}
if tx.BlobGasFeeCap().Cmp(currBaseFee) >= 0 {
viabletxs++
viableBlobs += len(tx.BlobHashes())
log.WithFields(txData(tx, chainID)).Infof("Transaction was still not included after %s", time.Since(txTime[hash]))
}
}
pendingTransactionGauge.Set(float64(len(pendingTxs)))
viableTransactionGauge.Set(float64(viabletxs))
viableBlobsGauge.Set(float64(viableBlobs))
transactionInclusionCounter.Add(float64(currentPendingTxs - len(pendingTxs)))
blobInclusionCounter.Add(float64(blobsIncluded))
blobInclusionBuilderCounter.WithLabelValues(strings.ToValidUTF8(string(h.Extra), "")).Add(float64(blobsIncluded))
builderCounter.WithLabelValues(strings.ToValidUTF8(string(h.Extra), "")).Add(1)
log.WithFields(log.Fields{
"previousPendingTxs": currentPendingTxs,
"currentPendingTxs": len(pendingTxs),
"viableTxs": viabletxs,
}).Infof("Post block Summary for blob transactions")
log.Infof("*/-------------------------------------------------------------------------------------------------------------------------------------------------------------------*/")
}
}
}
func txData(tx *gethtypes.Transaction, chainID *big.Int) log.Fields {
acc, err := gethtypes.Sender(gethtypes.NewCancunSigner(chainID), tx)
if err != nil {
log.WithError(err).Error("Could not get sender's account address")
return nil
}
accName := acc.String()
if name, ok := accountLabels[[20]byte(acc.Bytes())]; ok {
accName = name
}
return log.Fields{
"TxHash": tx.Hash(),
"BlobGasFeeCap(Gwei)": float64(tx.BlobGasFeeCap().Uint64()) / params.GWei,
"BlobGas": tx.BlobGas(),
"BlobCount": len(tx.BlobHashes()),
"GasFeeCap(Gwei)": float64(tx.GasFeeCap().Uint64()) / params.GWei,
"GasTipCap(Gwei)": float64(tx.GasTipCap().Uint64()) / params.GWei,
"Gas": tx.Gas(),
"Account": accName,
}
}
var accountLabels = map[[20]byte]string{
mustDecode("0xc1b634853cb333d3ad8663715b08f41a3aec47cc"): "Arbitrum",
mustDecode("0x6887246668a3b87f54deb3b94ba47a6f63f32985"): "Optimism",
mustDecode("0x5050f69a9786f081509234f1a7f4684b5e5b76c9"): "Base",
mustDecode("0x000000633b68f5d8d3a86593ebb815b4663bcbe0"): "Taiko",
mustDecode("0x2c169dfe5fbba12957bdd0ba47d9cedbfe260ca7"): "Starknet",
mustDecode("0x0D3250c3D5FAcb74Ac15834096397a3Ef790ec99"): "ZkSync",
mustDecode("0xcf2898225ed05be911d3709d9417e86e0b4cfc8f"): "Scroll",
mustDecode("0x415c8893d514f9bc5211d36eeda4183226b84aa7"): "Blast",
mustDecode("0xa9268341831efa4937537bc3e9eb36dbece83c7e"): "Linea",
}
func mustDecode(address string) [20]byte {
byteAddr := hexutil.MustDecode(address)
return [20]byte(byteAddr)
}
func recordTxMetrics(tx *gethtypes.Transaction, chainID *big.Int) {
acc, err := gethtypes.Sender(gethtypes.NewCancunSigner(chainID), tx)
if err != nil {
log.WithError(err).Error("Could not get sender's account address")
return
}
accName := acc.String()
if name, ok := accountLabels[[20]byte(acc.Bytes())]; ok {
accName = name
}
transactionsObservedGauge.WithLabelValues(accName, fmt.Sprintf("%d", len(tx.BlobHashes())), fmt.Sprintf("%d", tx.BlobGasFeeCap().Uint64())).Inc()
}
func recordTxInclusion(tx *gethtypes.Transaction, chainID *big.Int, inclusionDelay time.Duration) {
acc, err := gethtypes.Sender(gethtypes.NewCancunSigner(chainID), tx)
if err != nil {
log.WithError(err).Error("Could not get sender's account address")
return
}
accName := acc.String()
if name, ok := accountLabels[[20]byte(acc.Bytes())]; ok {
accName = name
}
transactionInclusionDelay.WithLabelValues(accName, fmt.Sprintf("%d", len(tx.BlobHashes()))).Observe(inclusionDelay.Seconds())
}

View File

@@ -0,0 +1,80 @@
package main
import (
"net/http"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
)
var (
transactionsObservedGauge = promauto.NewGaugeVec(prometheus.GaugeOpts{
Name: "transactions_observed",
Help: "Count the number of blob transactions observed in your local mempool",
}, []string{"account", "blobCount", "maxBlobBaseFee"})
transactionInclusionDelay = promauto.NewHistogramVec(
prometheus.HistogramOpts{
Name: "transaction_inclusion_delay",
Help: "The number of seconds it takes to include a blob transaction on chain",
Buckets: []float64{1, 2, 16, 32, 64, 128, 256, 512, 1024},
},
[]string{"account", "blobCount"},
)
blockNumberGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "block_number",
Help: "The current block number in your execution client",
})
blobBaseFeeGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "blob_base_fee",
Help: "The blob base fee",
})
pendingTransactionGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "pending_transactions",
Help: "The current number of pending transactions in the mempool",
})
viableTransactionGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "viable_transaction",
Help: "The current number of viable transactions in the mempool",
})
viableBlobsGauge = promauto.NewGauge(prometheus.GaugeOpts{
Name: "viable_blobs",
Help: "The current number of viable blobs in the mempool",
})
transactionInclusionCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "transaction_inclusion",
Help: "The current number of transactions included in a block",
})
blobInclusionCounter = promauto.NewCounter(prometheus.CounterOpts{
Name: "blob_inclusion",
Help: "The number of blobs included on chain via a transaction",
})
blobInclusionBuilderCounter = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "blob_inclusion_by_builder",
Help: "The number of blobs included on chain via a transaction by builder",
}, []string{"builder"})
builderCounter = promauto.NewCounterVec(prometheus.CounterOpts{
Name: "builder_blocks",
Help: "The number of blocks built by a builder",
}, []string{"builder"})
)
func StartMetricsServer(addr string) *http.Server {
mux := http.NewServeMux()
mux.Handle("/metrics", promhttp.HandlerFor(prometheus.DefaultGatherer, promhttp.HandlerOpts{
MaxRequestsInFlight: 5,
Timeout: 30 * time.Second,
}))
srv := &http.Server{Addr: addr, Handler: mux, ReadHeaderTimeout: time.Second}
log.WithField("address", srv.Addr).Debug("Starting prometheus server")
go func() {
err := srv.ListenAndServe()
if err != nil && err != http.ErrServerClosed {
log.WithError(err).Fatalf("Could not listen to host:port :%s", srv.Addr)
}
}()
return srv
}

View File

@@ -60,6 +60,9 @@ def _ssz_go_proto_library_impl(ctx):
if len(ctx.attr.objs) > 0:
args.append("--objs=%s" % ",".join(ctx.attr.objs))
if len(ctx.attr.exclude_objs) > 0:
args.append("--exclude-objs=%s" % ",".join(ctx.attr.exclude_objs))
ctx.actions.run(
executable = ctx.executable.sszgen,
progress_message = "Generating ssz marshal and unmarshal functions",
@@ -79,9 +82,10 @@ ssz_gen_marshal = rule(
cfg = "exec",
),
"objs": attr.string_list(),
"exclude_objs": attr.string_list(),
"includes": attr.label_list(providers = [GoLibrary]),
"out": attr.output(),
},
outputs = {"out": "generated.ssz.go"},
)
SSZ_DEPS = ["@com_github_prysmaticlabs_fastssz//:go_default_library"]

View File

@@ -138,6 +138,9 @@ func (v *validator) SubmitAggregateAndProof(ctx context.Context, slot primitives
// Signs input slot with domain selection proof. This is used to create the signature for aggregator selection.
func (v *validator) signSlotWithSelectionProof(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) (signature []byte, err error) {
ctx, span := trace.StartSpan(ctx, "validator.signSlotWithSelectionProof")
defer span.End()
domain, err := v.domainData(ctx, slots.ToEpoch(slot), params.BeaconConfig().DomainSelectionProof[:])
if err != nil {
return nil, err
@@ -194,6 +197,9 @@ func (v *validator) waitToSlotTwoThirds(ctx context.Context, slot primitives.Slo
// This returns the signature of validator signing over aggregate and
// proof object.
func (v *validator) aggregateAndProofSig(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, agg *ethpb.AggregateAttestationAndProof, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.aggregateAndProofSig")
defer span.End()
d, err := v.domainData(ctx, slots.ToEpoch(agg.Aggregate.Data.Slot), params.BeaconConfig().DomainAggregateAndProof[:])
if err != nil {
return nil, err

View File

@@ -199,6 +199,9 @@ func (v *validator) duty(pubKey [fieldparams.BLSPubkeyLength]byte) (*ethpb.Dutie
// Given validator's public key, this function returns the signature of an attestation data and its signing root.
func (v *validator) signAtt(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, data *ethpb.AttestationData, slot primitives.Slot) ([]byte, [32]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signAtt")
defer span.End()
domain, root, err := v.domainAndSigningRoot(ctx, data)
if err != nil {
return nil, [32]byte{}, err

View File

@@ -64,6 +64,7 @@ go_library(
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_google_grpc//:go_default_library",
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",

View File

@@ -116,7 +116,7 @@ func TestActivation_Nominal(t *testing.T) {
// Get does not return any result for non existing key
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
bytes.NewBuffer(reqBytes),
@@ -240,7 +240,7 @@ func TestActivation_InvalidData(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
gomock.Any(),
@@ -281,7 +281,7 @@ func TestActivation_JsonResponseError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
gomock.Any(),
@@ -291,7 +291,7 @@ func TestActivation_JsonResponseError(t *testing.T) {
).Times(1)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
).Return(

View File

@@ -33,7 +33,7 @@ func TestGetAttestationData_ValidAttestation(t *testing.T) {
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", expectedCommitteeIndex, expectedSlot),
&produceAttestationDataResponseJson,
).Return(
@@ -183,7 +183,7 @@ func TestGetAttestationData_InvalidData(t *testing.T) {
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/validator/attestation_data?committee_index=2&slot=1",
&produceAttestationDataResponseJson,
).Return(
@@ -212,7 +212,7 @@ func TestGetAttestationData_JsonResponseError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", committeeIndex, slot),
&produceAttestationDataResponseJson,
).Return(

View File

@@ -58,7 +58,7 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil,
errors.New("foo error"),
)
@@ -78,7 +78,7 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil,
errors.New("bar error"),
)
@@ -96,7 +96,7 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForHead(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil,
errors.New("foo error"),
)
@@ -114,13 +114,13 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForHead(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil,
nil,
)
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(ctx, blockHeaderEndpoint, gomock.Any()).Return(errors.New("bar error"))
jsonRestHandler.EXPECT().Get(gomock.Any(), blockHeaderEndpoint, gomock.Any()).Return(errors.New("bar error"))
beaconChainClient := beaconApiChainClient{
stateValidatorsProvider: stateValidatorsProvider,
@@ -187,13 +187,13 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForHead(ctx, gomock.Any(), gomock.Any(), gomock.Any()).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForHead(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
nil,
nil,
)
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(ctx, blockHeaderEndpoint, gomock.Any()).Return(
jsonRestHandler.EXPECT().Get(gomock.Any(), blockHeaderEndpoint, gomock.Any()).Return(
nil,
).SetArg(
2,
@@ -328,7 +328,7 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(
testCase.generateStateValidatorsResponse(),
nil,
)
@@ -556,7 +556,7 @@ func TestListValidators(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(ctx, primitives.Slot(0), make([]string, 0), []primitives.ValidatorIndex{}, nil).Return(
stateValidatorsProvider.EXPECT().StateValidatorsForSlot(gomock.Any(), primitives.Slot(0), make([]string, 0), []primitives.ValidatorIndex{}, nil).Return(
testCase.generateJsonStateValidatorsResponse(),
nil,
)
@@ -745,7 +745,7 @@ func TestGetChainHead(t *testing.T) {
finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(ctx, finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
jsonRestHandler.EXPECT().Get(gomock.Any(), finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
testCase.finalityCheckpointsError,
).SetArg(
2,
@@ -844,7 +844,7 @@ func TestGetChainHead(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{}
jsonRestHandler.EXPECT().Get(ctx, finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
jsonRestHandler.EXPECT().Get(gomock.Any(), finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
nil,
).SetArg(
2,
@@ -852,7 +852,7 @@ func TestGetChainHead(t *testing.T) {
)
headBlockHeadersResponse := structs.GetBlockHeaderResponse{}
jsonRestHandler.EXPECT().Get(ctx, headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
jsonRestHandler.EXPECT().Get(gomock.Any(), headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
testCase.headBlockHeadersError,
).SetArg(
2,
@@ -874,7 +874,7 @@ func TestGetChainHead(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
finalityCheckpointsResponse := structs.GetFinalityCheckpointsResponse{}
jsonRestHandler.EXPECT().Get(ctx, finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
jsonRestHandler.EXPECT().Get(gomock.Any(), finalityCheckpointsEndpoint, &finalityCheckpointsResponse).Return(
nil,
).SetArg(
2,
@@ -882,7 +882,7 @@ func TestGetChainHead(t *testing.T) {
)
headBlockHeadersResponse := structs.GetBlockHeaderResponse{}
jsonRestHandler.EXPECT().Get(ctx, headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
jsonRestHandler.EXPECT().Get(gomock.Any(), headBlockHeadersEndpoint, &headBlockHeadersResponse).Return(
nil,
).SetArg(
2,
@@ -940,7 +940,7 @@ func Test_beaconApiBeaconChainClient_GetValidatorPerformance(t *testing.T) {
want := &ethpb.ValidatorPerformanceResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
getValidatorPerformanceEndpoint,
nil,
bytes.NewBuffer(request),

View File

@@ -109,7 +109,7 @@ func TestGetFork_Nominal(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
forkEndpoint,
&stateForkResponseJson,
).Return(
@@ -137,7 +137,7 @@ func TestGetFork_Invalid(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
forkEndpoint,
gomock.Any(),
).Return(
@@ -176,7 +176,7 @@ func TestGetHeaders_Nominal(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
headersEndpoint,
&blockHeadersResponseJson,
).Return(
@@ -204,7 +204,7 @@ func TestGetHeaders_Invalid(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
headersEndpoint,
gomock.Any(),
).Return(
@@ -248,7 +248,7 @@ func TestGetLiveness_Nominal(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
livenessEndpoint,
nil,
bytes.NewBuffer(marshalledIndexes),
@@ -275,7 +275,7 @@ func TestGetLiveness_Invalid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
livenessEndpoint,
nil,
gomock.Any(),
@@ -324,7 +324,7 @@ func TestGetIsSyncing_Nominal(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
syncingEndpoint,
&syncingResponseJson,
).Return(
@@ -355,7 +355,7 @@ func TestGetIsSyncing_Invalid(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
syncingEndpoint,
&syncingResponseJson,
).Return(

View File

@@ -113,7 +113,7 @@ func TestGetGenesis(t *testing.T) {
genesisProvider := mock.NewMockGenesisProvider(ctrl)
genesisProvider.EXPECT().Genesis(
ctx,
gomock.Any(),
).Return(
testCase.genesisResponse,
testCase.genesisError,
@@ -124,7 +124,7 @@ func TestGetGenesis(t *testing.T) {
if testCase.queriesDepositContract {
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/config/deposit_contract",
&depositContractJson,
).Return(
@@ -203,7 +203,7 @@ func TestGetSyncStatus(t *testing.T) {
syncingResponse := structs.SyncStatusResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
syncingEndpoint,
&syncingResponse,
).Return(
@@ -267,7 +267,7 @@ func TestGetVersion(t *testing.T) {
var versionResponse structs.GetVersionResponse
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
versionEndpoint,
&versionResponse,
).Return(

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/v5/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v5/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v5/validator/client/iface"
"go.opencensus.io/trace"
)
type ValidatorClientOpt func(*beaconApiValidatorClient)
@@ -47,12 +48,16 @@ func NewBeaconApiValidatorClient(jsonRestHandler JsonRestHandler, opts ...Valida
}
func (c *beaconApiValidatorClient) Duties(ctx context.Context, in *ethpb.DutiesRequest) (*ethpb.DutiesResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.Duties")
defer span.End()
return wrapInMetrics[*ethpb.DutiesResponse]("Duties", func() (*ethpb.DutiesResponse, error) {
return c.duties(ctx, in)
})
}
func (c *beaconApiValidatorClient) CheckDoppelGanger(ctx context.Context, in *ethpb.DoppelGangerRequest) (*ethpb.DoppelGangerResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.CheckDoppelGanger")
defer span.End()
return wrapInMetrics[*ethpb.DoppelGangerResponse]("CheckDoppelGanger", func() (*ethpb.DoppelGangerResponse, error) {
return c.checkDoppelGanger(ctx, in)
})
@@ -62,6 +67,10 @@ func (c *beaconApiValidatorClient) DomainData(ctx context.Context, in *ethpb.Dom
if len(in.Domain) != 4 {
return nil, errors.Errorf("invalid domain type: %s", hexutil.Encode(in.Domain))
}
ctx, span := trace.StartSpan(ctx, "beacon-api.DomainData")
defer span.End()
domainType := bytesutil.ToBytes4(in.Domain)
return wrapInMetrics[*ethpb.DomainResponse]("DomainData", func() (*ethpb.DomainResponse, error) {
@@ -70,12 +79,18 @@ func (c *beaconApiValidatorClient) DomainData(ctx context.Context, in *ethpb.Dom
}
func (c *beaconApiValidatorClient) AttestationData(ctx context.Context, in *ethpb.AttestationDataRequest) (*ethpb.AttestationData, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.AttestationData")
defer span.End()
return wrapInMetrics[*ethpb.AttestationData]("AttestationData", func() (*ethpb.AttestationData, error) {
return c.attestationData(ctx, in.Slot, in.CommitteeIndex)
})
}
func (c *beaconApiValidatorClient) BeaconBlock(ctx context.Context, in *ethpb.BlockRequest) (*ethpb.GenericBeaconBlock, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.BeaconBlock")
defer span.End()
return wrapInMetrics[*ethpb.GenericBeaconBlock]("BeaconBlock", func() (*ethpb.GenericBeaconBlock, error) {
return c.beaconBlock(ctx, in.Slot, in.RandaoReveal, in.Graffiti)
})
@@ -86,48 +101,72 @@ func (c *beaconApiValidatorClient) FeeRecipientByPubKey(_ context.Context, _ *et
}
func (c *beaconApiValidatorClient) SyncCommitteeContribution(ctx context.Context, in *ethpb.SyncCommitteeContributionRequest) (*ethpb.SyncCommitteeContribution, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SyncCommitteeContribution")
defer span.End()
return wrapInMetrics[*ethpb.SyncCommitteeContribution]("SyncCommitteeContribution", func() (*ethpb.SyncCommitteeContribution, error) {
return c.syncCommitteeContribution(ctx, in)
})
}
func (c *beaconApiValidatorClient) SyncMessageBlockRoot(ctx context.Context, _ *empty.Empty) (*ethpb.SyncMessageBlockRootResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SyncMessageBlockRoot")
defer span.End()
return wrapInMetrics[*ethpb.SyncMessageBlockRootResponse]("SyncMessageBlockRoot", func() (*ethpb.SyncMessageBlockRootResponse, error) {
return c.syncMessageBlockRoot(ctx)
})
}
func (c *beaconApiValidatorClient) SyncSubcommitteeIndex(ctx context.Context, in *ethpb.SyncSubcommitteeIndexRequest) (*ethpb.SyncSubcommitteeIndexResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SyncSubcommitteeIndex")
defer span.End()
return wrapInMetrics[*ethpb.SyncSubcommitteeIndexResponse]("SyncSubcommitteeIndex", func() (*ethpb.SyncSubcommitteeIndexResponse, error) {
return c.syncSubcommitteeIndex(ctx, in)
})
}
func (c *beaconApiValidatorClient) MultipleValidatorStatus(ctx context.Context, in *ethpb.MultipleValidatorStatusRequest) (*ethpb.MultipleValidatorStatusResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.MultipleValidatorStatus")
defer span.End()
return wrapInMetrics[*ethpb.MultipleValidatorStatusResponse]("MultipleValidatorStatus", func() (*ethpb.MultipleValidatorStatusResponse, error) {
return c.multipleValidatorStatus(ctx, in)
})
}
func (c *beaconApiValidatorClient) PrepareBeaconProposer(ctx context.Context, in *ethpb.PrepareBeaconProposerRequest) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.PrepareBeaconProposer")
defer span.End()
return wrapInMetrics[*empty.Empty]("PrepareBeaconProposer", func() (*empty.Empty, error) {
return new(empty.Empty), c.prepareBeaconProposer(ctx, in.Recipients)
})
}
func (c *beaconApiValidatorClient) ProposeAttestation(ctx context.Context, in *ethpb.Attestation) (*ethpb.AttestResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.ProposeAttestation")
defer span.End()
return wrapInMetrics[*ethpb.AttestResponse]("ProposeAttestation", func() (*ethpb.AttestResponse, error) {
return c.proposeAttestation(ctx, in)
})
}
func (c *beaconApiValidatorClient) ProposeBeaconBlock(ctx context.Context, in *ethpb.GenericSignedBeaconBlock) (*ethpb.ProposeResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.ProposeBeaconBlock")
defer span.End()
return wrapInMetrics[*ethpb.ProposeResponse]("ProposeBeaconBlock", func() (*ethpb.ProposeResponse, error) {
return c.proposeBeaconBlock(ctx, in)
})
}
func (c *beaconApiValidatorClient) ProposeExit(ctx context.Context, in *ethpb.SignedVoluntaryExit) (*ethpb.ProposeExitResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.ProposeExit")
defer span.End()
return wrapInMetrics[*ethpb.ProposeExitResponse]("ProposeExit", func() (*ethpb.ProposeExitResponse, error) {
return c.proposeExit(ctx, in)
})
@@ -138,52 +177,79 @@ func (c *beaconApiValidatorClient) StreamBlocksAltair(ctx context.Context, in *e
}
func (c *beaconApiValidatorClient) SubmitAggregateSelectionProof(ctx context.Context, in *ethpb.AggregateSelectionRequest, index primitives.ValidatorIndex, committeeLength uint64) (*ethpb.AggregateSelectionResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitAggregateSelectionProof")
defer span.End()
return wrapInMetrics[*ethpb.AggregateSelectionResponse]("SubmitAggregateSelectionProof", func() (*ethpb.AggregateSelectionResponse, error) {
return c.submitAggregateSelectionProof(ctx, in, index, committeeLength)
})
}
func (c *beaconApiValidatorClient) SubmitSignedAggregateSelectionProof(ctx context.Context, in *ethpb.SignedAggregateSubmitRequest) (*ethpb.SignedAggregateSubmitResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitSignedAggregateSelectionProof")
defer span.End()
return wrapInMetrics[*ethpb.SignedAggregateSubmitResponse]("SubmitSignedAggregateSelectionProof", func() (*ethpb.SignedAggregateSubmitResponse, error) {
return c.submitSignedAggregateSelectionProof(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitSignedContributionAndProof(ctx context.Context, in *ethpb.SignedContributionAndProof) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitSignedContributionAndProof")
defer span.End()
return wrapInMetrics[*empty.Empty]("SubmitSignedContributionAndProof", func() (*empty.Empty, error) {
return new(empty.Empty), c.submitSignedContributionAndProof(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitSyncMessage(ctx context.Context, in *ethpb.SyncCommitteeMessage) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitSyncMessage")
defer span.End()
return wrapInMetrics[*empty.Empty]("SubmitSyncMessage", func() (*empty.Empty, error) {
return new(empty.Empty), c.submitSyncMessage(ctx, in)
})
}
func (c *beaconApiValidatorClient) SubmitValidatorRegistrations(ctx context.Context, in *ethpb.SignedValidatorRegistrationsV1) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubmitValidatorRegistrations")
defer span.End()
return wrapInMetrics[*empty.Empty]("SubmitValidatorRegistrations", func() (*empty.Empty, error) {
return new(empty.Empty), c.submitValidatorRegistrations(ctx, in.Messages)
})
}
func (c *beaconApiValidatorClient) SubscribeCommitteeSubnets(ctx context.Context, in *ethpb.CommitteeSubnetsSubscribeRequest, duties []*ethpb.DutiesResponse_Duty) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.SubscribeCommitteeSubnets")
defer span.End()
return wrapInMetrics[*empty.Empty]("SubscribeCommitteeSubnets", func() (*empty.Empty, error) {
return new(empty.Empty), c.subscribeCommitteeSubnets(ctx, in, duties)
})
}
func (c *beaconApiValidatorClient) ValidatorIndex(ctx context.Context, in *ethpb.ValidatorIndexRequest) (*ethpb.ValidatorIndexResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.ValidatorIndex")
defer span.End()
return wrapInMetrics[*ethpb.ValidatorIndexResponse]("ValidatorIndex", func() (*ethpb.ValidatorIndexResponse, error) {
return c.validatorIndex(ctx, in)
})
}
func (c *beaconApiValidatorClient) ValidatorStatus(ctx context.Context, in *ethpb.ValidatorStatusRequest) (*ethpb.ValidatorStatusResponse, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.ValidatorStatus")
defer span.End()
return c.validatorStatus(ctx, in)
}
func (c *beaconApiValidatorClient) WaitForActivation(ctx context.Context, in *ethpb.ValidatorActivationRequest) (ethpb.BeaconNodeValidator_WaitForActivationClient, error) {
ctx, span := trace.StartSpan(ctx, "beacon-api.WaitForActivation")
defer span.End()
return c.waitForActivation(ctx, in)
}
@@ -212,11 +278,21 @@ func (c *beaconApiValidatorClient) EventStreamIsRunning() bool {
}
func (c *beaconApiValidatorClient) AggregatedSelections(ctx context.Context, selections []iface.BeaconCommitteeSelection) ([]iface.BeaconCommitteeSelection, error) {
return c.aggregatedSelection(ctx, selections)
ctx, span := trace.StartSpan(ctx, "beacon-api.AggregatedSelections")
defer span.End()
return wrapInMetrics[[]iface.BeaconCommitteeSelection]("AggregatedSelections", func() ([]iface.BeaconCommitteeSelection, error) {
return c.aggregatedSelection(ctx, selections)
})
}
func (c *beaconApiValidatorClient) AggregatedSyncSelections(ctx context.Context, selections []iface.SyncCommitteeSelection) ([]iface.SyncCommitteeSelection, error) {
return c.aggregatedSyncSelections(ctx, selections)
ctx, span := trace.StartSpan(ctx, "beacon-api.AggregatedSyncSelections")
defer span.End()
return wrapInMetrics[[]iface.SyncCommitteeSelection]("AggregatedSyncSelections", func() ([]iface.SyncCommitteeSelection, error) {
return c.aggregatedSyncSelections(ctx, selections)
})
}
func wrapInMetrics[Resp any](action string, f func() (Resp, error)) (Resp, error) {

View File

@@ -32,7 +32,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", committeeIndex, slot),
&produceAttestationDataResponseJson,
).Return(
@@ -66,7 +66,7 @@ func TestBeaconApiValidatorClient_GetAttestationDataError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
produceAttestationDataResponseJson := structs.GetAttestationDataResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/attestation_data?committee_index=%d&slot=%d", committeeIndex, slot),
&produceAttestationDataResponseJson,
).Return(
@@ -109,7 +109,7 @@ func TestBeaconApiValidatorClient_DomainDataValid(t *testing.T) {
ctx := context.Background()
genesisProvider := mock.NewMockGenesisProvider(ctrl)
genesisProvider.EXPECT().Genesis(ctx).Return(
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(
&structs.Genesis{GenesisValidatorsRoot: genesisValidatorRoot},
nil,
).Times(2)
@@ -139,7 +139,7 @@ func TestBeaconApiValidatorClient_ProposeBeaconBlockValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks",
map[string]string{"Eth-Consensus-Version": "phase0"},
gomock.Any(),
@@ -175,7 +175,7 @@ func TestBeaconApiValidatorClient_ProposeBeaconBlockError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks",
map[string]string{"Eth-Consensus-Version": "phase0"},
gomock.Any(),

View File

@@ -98,7 +98,7 @@ func TestGetAggregatedSelections(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/validator/beacon_committee_selections",
nil,
bytes.NewBuffer(reqBody),

View File

@@ -37,7 +37,7 @@ func TestGetDomainData_ValidDomainData(t *testing.T) {
// Make sure that Genesis() is called exactly once
genesisProvider := mock.NewMockGenesisProvider(ctrl)
genesisProvider.EXPECT().Genesis(ctx).Return(
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(
&structs.Genesis{GenesisValidatorsRoot: genesisValidatorRoot},
nil,
).Times(1)
@@ -66,7 +66,7 @@ func TestGetDomainData_GenesisError(t *testing.T) {
// Make sure that Genesis() is called exactly once
genesisProvider := mock.NewMockGenesisProvider(ctrl)
genesisProvider.EXPECT().Genesis(ctx).Return(nil, errors.New("foo error")).Times(1)
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(nil, errors.New("foo error")).Times(1)
validatorClient := &beaconApiValidatorClient{genesisProvider: genesisProvider}
_, err := validatorClient.domainData(ctx, epoch, domainType)
@@ -85,7 +85,7 @@ func TestGetDomainData_InvalidGenesisRoot(t *testing.T) {
// Make sure that Genesis() is called exactly once
genesisProvider := mock.NewMockGenesisProvider(ctrl)
genesisProvider.EXPECT().Genesis(ctx).Return(
genesisProvider.EXPECT().Genesis(gomock.Any()).Return(
&structs.Genesis{GenesisValidatorsRoot: "foo"},
nil,
).Times(1)

View File

@@ -291,13 +291,11 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
ctx := context.Background()
if testCase.getSyncingOutput != nil {
syncingResponseJson := structs.SyncStatusResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
syncingEndpoint,
&syncingResponseJson,
).Return(
@@ -312,7 +310,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
stateForkResponseJson := structs.GetStateForkResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
forkEndpoint,
&stateForkResponseJson,
).Return(
@@ -327,7 +325,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
blockHeadersResponseJson := structs.GetBlockHeadersResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
headersEndpoint,
&blockHeadersResponseJson,
).Return(
@@ -346,7 +344,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
iface.inputUrl,
nil,
bytes.NewBuffer(marshalledIndexes),
@@ -364,7 +362,7 @@ func TestCheckDoppelGanger_Nominal(t *testing.T) {
if testCase.getStateValidatorsInterface != nil {
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
testCase.getStateValidatorsInterface.input,
nil,
nil,
@@ -727,13 +725,11 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
ctx := context.Background()
if testCase.getSyncingOutput != nil {
syncingResponseJson := structs.SyncStatusResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
syncingEndpoint,
&syncingResponseJson,
).Return(
@@ -748,7 +744,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
stateForkResponseJson := structs.GetStateForkResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
forkEndpoint,
&stateForkResponseJson,
).Return(
@@ -763,7 +759,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
blockHeadersResponseJson := structs.GetBlockHeadersResponse{}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
headersEndpoint,
&blockHeadersResponseJson,
).Return(
@@ -778,7 +774,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
if testCase.getStateValidatorsInterface != nil {
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
testCase.getStateValidatorsInterface.input,
nil,
nil,
@@ -796,7 +792,7 @@ func TestCheckDoppelGanger_Errors(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
iface.inputUrl,
nil,
bytes.NewBuffer(marshalledIndexes),

View File

@@ -63,7 +63,7 @@ func TestGetAttesterDuties_Valid(t *testing.T) {
validatorIndices := []primitives.ValidatorIndex{2, 9}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getAttesterDutiesTestEndpoint, epoch),
nil,
bytes.NewBuffer(validatorIndicesBytes),
@@ -91,7 +91,7 @@ func TestGetAttesterDuties_HttpError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getAttesterDutiesTestEndpoint, epoch),
gomock.Any(),
gomock.Any(),
@@ -115,7 +115,7 @@ func TestGetAttesterDuties_NilAttesterDuty(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getAttesterDutiesTestEndpoint, epoch),
gomock.Any(),
gomock.Any(),
@@ -159,7 +159,7 @@ func TestGetProposerDuties_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getProposerDutiesTestEndpoint, epoch),
&structs.GetProposerDutiesResponse{},
).Return(
@@ -185,7 +185,7 @@ func TestGetProposerDuties_HttpError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getProposerDutiesTestEndpoint, epoch),
gomock.Any(),
).Return(
@@ -207,7 +207,7 @@ func TestGetProposerDuties_NilData(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getProposerDutiesTestEndpoint, epoch),
gomock.Any(),
).Return(
@@ -234,7 +234,7 @@ func TestGetProposerDuties_NilProposerDuty(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getProposerDutiesTestEndpoint, epoch),
gomock.Any(),
).Return(
@@ -287,7 +287,7 @@ func TestGetSyncDuties_Valid(t *testing.T) {
validatorIndices := []primitives.ValidatorIndex{2, 6}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getSyncDutiesTestEndpoint, epoch),
nil,
bytes.NewBuffer(validatorIndicesBytes),
@@ -315,7 +315,7 @@ func TestGetSyncDuties_HttpError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getSyncDutiesTestEndpoint, epoch),
gomock.Any(),
gomock.Any(),
@@ -339,7 +339,7 @@ func TestGetSyncDuties_NilData(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getSyncDutiesTestEndpoint, epoch),
gomock.Any(),
gomock.Any(),
@@ -368,7 +368,7 @@ func TestGetSyncDuties_NilSyncDuty(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", getSyncDutiesTestEndpoint, epoch),
gomock.Any(),
gomock.Any(),
@@ -418,7 +418,7 @@ func TestGetCommittees_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s?epoch=%d", getCommitteesTestEndpoint, epoch),
&structs.GetCommitteesResponse{},
).Return(
@@ -444,7 +444,7 @@ func TestGetCommittees_HttpError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s?epoch=%d", getCommitteesTestEndpoint, epoch),
gomock.Any(),
).Return(
@@ -466,7 +466,7 @@ func TestGetCommittees_NilData(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s?epoch=%d", getCommitteesTestEndpoint, epoch),
gomock.Any(),
).Return(
@@ -493,7 +493,7 @@ func TestGetCommittees_NilCommittee(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s?epoch=%d", getCommitteesTestEndpoint, epoch),
gomock.Any(),
).Return(
@@ -1079,7 +1079,7 @@ func TestGetDuties_Valid(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
gomock.Any(),
@@ -1233,7 +1233,7 @@ func TestGetDuties_GetStateValidatorsFailed(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
gomock.Any(),
@@ -1263,7 +1263,7 @@ func TestGetDuties_GetDutiesForEpochFailed(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
gomock.Any(),

View File

@@ -21,7 +21,7 @@ func TestGetGenesis_ValidGenesis(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -53,7 +53,7 @@ func TestGetGenesis_NilData(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -77,7 +77,7 @@ func TestGetGenesis_EndpointCalledOnlyOnce(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -111,14 +111,14 @@ func TestGetGenesis_EndpointCanBeCalledAgainAfterError(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
errors.New("foo"),
).Times(1)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(

View File

@@ -28,7 +28,7 @@ func TestGetBeaconBlock_RequestFailed(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
gomock.Any(),
gomock.Any(),
).Return(
@@ -125,7 +125,7 @@ func TestGetBeaconBlock_Error(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
gomock.Any(),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -161,7 +161,7 @@ func TestGetBeaconBlock_Phase0Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -204,7 +204,7 @@ func TestGetBeaconBlock_AltairValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -247,7 +247,7 @@ func TestGetBeaconBlock_BellatrixValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -292,7 +292,7 @@ func TestGetBeaconBlock_BlindedBellatrixValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -337,7 +337,7 @@ func TestGetBeaconBlock_CapellaValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -382,7 +382,7 @@ func TestGetBeaconBlock_BlindedCapellaValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -427,7 +427,7 @@ func TestGetBeaconBlock_DenebValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -472,7 +472,7 @@ func TestGetBeaconBlock_BlindedDenebValid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).SetArg(
@@ -517,14 +517,14 @@ func TestGetBeaconBlock_FallbackToBlindedBlock(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).Return(
&httputil.DefaultJsonError{Code: http.StatusNotFound},
).Times(1)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/blinded_blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&abstractProduceBlockResponseJson{},
).SetArg(
@@ -568,14 +568,14 @@ func TestGetBeaconBlock_FallbackToFullBlock(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v3/validator/blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&structs.ProduceBlockV3Response{},
).Return(
&httputil.DefaultJsonError{Code: http.StatusNotFound},
).Times(1)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/blinded_blocks/%d?graffiti=%s&randao_reveal=%s", slot, hexutil.Encode(graffiti), hexutil.Encode(randaoReveal)),
&abstractProduceBlockResponseJson{},
).Return(

View File

@@ -44,7 +44,7 @@ func TestIndex_Nominal(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
reqBuffer,
@@ -94,7 +94,7 @@ func TestIndex_UnexistingValidator(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
reqBuffer,
@@ -136,7 +136,7 @@ func TestIndex_BadIndexError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
reqBuffer,
@@ -185,7 +185,7 @@ func TestIndex_JsonResponseError(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
reqBuffer,
@@ -208,7 +208,7 @@ func TestIndex_JsonResponseError(t *testing.T) {
}
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
buildURL("/eth/v1/beacon/states/head/validators", queryParams),
&stateValidatorsResponseJson,
).Return(

View File

@@ -48,7 +48,7 @@ func TestPrepareBeaconProposer_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
prepareBeaconProposerTestEndpoint,
nil,
bytes.NewBuffer(marshalledJsonRecipients),
@@ -92,7 +92,7 @@ func TestPrepareBeaconProposer_BadRequest(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
prepareBeaconProposerTestEndpoint,
nil,
gomock.Any(),

View File

@@ -126,7 +126,7 @@ func TestProposeAttestation(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/pool/attestations",
nil,
bytes.NewBuffer(marshalledAttestations),

View File

@@ -58,7 +58,7 @@ func TestProposeBeaconBlock_Altair(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "altair"}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks",
headers,
bytes.NewBuffer(marshalledBlock),

View File

@@ -75,7 +75,7 @@ func TestProposeBeaconBlock_Bellatrix(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "bellatrix"}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks",
headers,
bytes.NewBuffer(marshalledBlock),

View File

@@ -76,7 +76,7 @@ func TestProposeBeaconBlock_BlindedBellatrix(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "bellatrix"}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blinded_blocks",
headers,
bytes.NewBuffer(marshalledBlock),

View File

@@ -78,7 +78,7 @@ func TestProposeBeaconBlock_BlindedCapella(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "capella"}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blinded_blocks",
headers,
bytes.NewBuffer(marshalledBlock),

View File

@@ -31,7 +31,7 @@ func TestProposeBeaconBlock_BlindedDeneb(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "deneb"}
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/beacon/blinded_blocks",
headers,
bytes.NewBuffer(denebBytes),

View File

@@ -75,7 +75,7 @@ func TestProposeBeaconBlock_Capella(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "capella"}
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/beacon/blocks",
headers,
bytes.NewBuffer(marshalledBlock),

View File

@@ -32,7 +32,7 @@ func TestProposeBeaconBlock_Deneb(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "deneb"}
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/beacon/blocks",
headers,
bytes.NewBuffer(denebBytes),

View File

@@ -54,7 +54,7 @@ func TestProposeBeaconBlock_Phase0(t *testing.T) {
// Make sure that what we send in the POST body is the marshalled version of the protobuf block
headers := map[string]string{"Eth-Consensus-Version": "phase0"}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks",
headers,
bytes.NewBuffer(marshalledBlock),

View File

@@ -101,7 +101,7 @@ func TestProposeBeaconBlock_Error(t *testing.T) {
headers := map[string]string{"Eth-Consensus-Version": testCase.consensusVersion}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
testCase.endpoint,
headers,
gomock.Any(),

View File

@@ -39,7 +39,7 @@ func TestProposeExit_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
proposeExitTestEndpoint,
nil,
bytes.NewBuffer(marshalledVoluntaryExit),
@@ -88,7 +88,7 @@ func TestProposeExit_BadRequest(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
proposeExitTestEndpoint,
nil,
gomock.Any(),

View File

@@ -68,7 +68,7 @@ func TestRegistration_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/validator/register_validator",
nil,
bytes.NewBuffer(marshalledJsonRegistrations),
@@ -143,7 +143,7 @@ func TestRegistration_BadRequest(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/validator/register_validator",
nil,
gomock.Any(),

View File

@@ -70,7 +70,7 @@ func TestGetStateValidators_Nominal_POST(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
bytes.NewBuffer(reqBytes),
@@ -157,7 +157,7 @@ func TestGetStateValidators_Nominal_GET(t *testing.T) {
// First return an error from POST call.
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
bytes.NewBuffer(reqBytes),
@@ -178,7 +178,7 @@ func TestGetStateValidators_Nominal_GET(t *testing.T) {
query := buildURL("/eth/v1/beacon/states/head/validators", queryParams)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
query,
&stateValidatorsResponseJson,
).Return(
@@ -226,7 +226,7 @@ func TestGetStateValidators_GetRestJsonResponseOnError(t *testing.T) {
// First call POST.
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
bytes.NewBuffer(reqBytes),
@@ -247,7 +247,7 @@ func TestGetStateValidators_GetRestJsonResponseOnError(t *testing.T) {
query := buildURL("/eth/v1/beacon/states/head/validators", queryParams)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
query,
&stateValidatorsResponseJson,
).Return(
@@ -280,7 +280,7 @@ func TestGetStateValidators_DataIsNil_POST(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil, bytes.NewBuffer(reqBytes),
&stateValidatorsResponseJson,
@@ -320,7 +320,7 @@ func TestGetStateValidators_DataIsNil_GET(t *testing.T) {
// First call POST which will return an error.
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validators",
nil,
bytes.NewBuffer(reqBytes),
@@ -341,7 +341,7 @@ func TestGetStateValidators_DataIsNil_GET(t *testing.T) {
query := buildURL("/eth/v1/beacon/states/head/validators", queryParams)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
query,
&stateValidatorsResponseJson,
).Return(

View File

@@ -32,7 +32,7 @@ func TestValidatorStatus_Nominal(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
[]string{stringValidatorPubKey},
nil,
nil,
@@ -65,7 +65,7 @@ func TestValidatorStatus_Nominal(t *testing.T) {
// Expect node version endpoint call.
var nodeVersionResponse structs.GetVersionResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/node/version",
&nodeVersionResponse,
).Return(
@@ -97,7 +97,7 @@ func TestValidatorStatus_Error(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
gomock.Any(),
nil,
nil,
@@ -139,7 +139,7 @@ func TestMultipleValidatorStatus_Nominal(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
stringValidatorsPubKey,
[]primitives.ValidatorIndex{},
nil,
@@ -172,7 +172,7 @@ func TestMultipleValidatorStatus_Nominal(t *testing.T) {
// Expect node version endpoint call.
var nodeVersionResponse structs.GetVersionResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/node/version",
&nodeVersionResponse,
).Return(
@@ -224,7 +224,7 @@ func TestMultipleValidatorStatus_Error(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
gomock.Any(),
[]primitives.ValidatorIndex{},
nil,
@@ -276,7 +276,7 @@ func TestGetValidatorsStatusResponse_Nominal_SomeActiveValidators(t *testing.T)
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
stringValidatorsPubKey,
validatorsIndex,
nil,
@@ -333,7 +333,7 @@ func TestGetValidatorsStatusResponse_Nominal_SomeActiveValidators(t *testing.T)
// Expect node version endpoint call.
var nodeVersionResponse structs.GetVersionResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/node/version",
&nodeVersionResponse,
).Return(
@@ -345,7 +345,7 @@ func TestGetValidatorsStatusResponse_Nominal_SomeActiveValidators(t *testing.T)
var validatorCountResponse structs.GetValidatorCountResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validator_count?",
&validatorCountResponse,
).Return(
@@ -456,7 +456,7 @@ func TestGetValidatorsStatusResponse_Nominal_NoActiveValidators(t *testing.T) {
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
[]string{stringValidatorPubKey},
nil,
nil,
@@ -481,7 +481,7 @@ func TestGetValidatorsStatusResponse_Nominal_NoActiveValidators(t *testing.T) {
// Expect node version endpoint call.
var nodeVersionResponse structs.GetVersionResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/node/version",
&nodeVersionResponse,
).Return(
@@ -706,7 +706,7 @@ func TestValidatorStatusResponse_InvalidData(t *testing.T) {
ctx := context.Background()
stateValidatorsProvider := mock.NewMockStateValidatorsProvider(ctrl)
stateValidatorsProvider.EXPECT().StateValidators(
ctx,
gomock.Any(),
testCase.inputGetStateValidatorsInterface.inputStringPubKeys,
testCase.inputGetStateValidatorsInterface.inputIndexes,
testCase.inputGetStateValidatorsInterface.inputStatuses,
@@ -720,7 +720,7 @@ func TestValidatorStatusResponse_InvalidData(t *testing.T) {
// Expect node version endpoint call.
var nodeVersionResponse structs.GetVersionResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/node/version",
&nodeVersionResponse,
).Return(

View File

@@ -27,7 +27,7 @@ func TestStreamBlocks_UnsupportedConsensusVersion(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
gomock.Any(),
&abstractSignedBlockResponseJson{},
).SetArg(
@@ -149,7 +149,7 @@ func TestStreamBlocks_Error(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
gomock.Any(),
&abstractSignedBlockResponseJson{},
).SetArg(
@@ -214,7 +214,7 @@ func TestStreamBlocks_Phase0Valid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -251,7 +251,7 @@ func TestStreamBlocks_Phase0Valid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -278,7 +278,7 @@ func TestStreamBlocks_Phase0Valid(t *testing.T) {
// The fourth call is only necessary when verifiedOnly == true since the previous block was optimistic
if testCase.verifiedOnly {
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -375,7 +375,7 @@ func TestStreamBlocks_AltairValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -412,7 +412,7 @@ func TestStreamBlocks_AltairValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -439,7 +439,7 @@ func TestStreamBlocks_AltairValid(t *testing.T) {
// The fourth call is only necessary when verifiedOnly == true since the previous block was optimistic
if testCase.verifiedOnly {
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -536,7 +536,7 @@ func TestStreamBlocks_BellatrixValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -573,7 +573,7 @@ func TestStreamBlocks_BellatrixValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -600,7 +600,7 @@ func TestStreamBlocks_BellatrixValid(t *testing.T) {
// The fourth call is only necessary when verifiedOnly == true since the previous block was optimistic
if testCase.verifiedOnly {
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -697,7 +697,7 @@ func TestStreamBlocks_CapellaValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -734,7 +734,7 @@ func TestStreamBlocks_CapellaValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -761,7 +761,7 @@ func TestStreamBlocks_CapellaValid(t *testing.T) {
// The fourth call is only necessary when verifiedOnly == true since the previous block was optimistic
if testCase.verifiedOnly {
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -858,7 +858,7 @@ func TestStreamBlocks_DenebValid(t *testing.T) {
marshalledSignedBeaconBlockContainer1, err := json.Marshal(denebBlock)
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -887,7 +887,7 @@ func TestStreamBlocks_DenebValid(t *testing.T) {
require.NoError(t, err)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(
@@ -904,7 +904,7 @@ func TestStreamBlocks_DenebValid(t *testing.T) {
// The fourth call is only necessary when verifiedOnly == true since the previous block was optimistic
if testCase.verifiedOnly {
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v2/beacon/blocks/head",
&signedBlockResponseJson,
).Return(

View File

@@ -98,7 +98,7 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
// Call node syncing endpoint to check if head is optimistic.
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
syncingEndpoint,
&structs.SyncStatusResponse{},
).SetArg(
@@ -114,7 +114,7 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
// Call attestation data to get attestation data root to query aggregate attestation.
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s?committee_index=%d&slot=%d", attestationDataEndpoint, committeeIndex, slot),
&structs.GetAttestationDataResponse{},
).SetArg(
@@ -126,7 +126,7 @@ func TestSubmitAggregateSelectionProof(t *testing.T) {
// Call attestation data to get attestation data root to query aggregate attestation.
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("%s?attestation_data_root=%s&slot=%d", aggregateAttestationEndpoint, hexutil.Encode(attestationDataRootBytes[:]), slot),
&structs.AggregateAttestationResponse{},
).SetArg(

View File

@@ -28,7 +28,7 @@ func TestSubmitSignedAggregateSelectionProof_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/validator/aggregate_and_proofs",
nil,
bytes.NewBuffer(marshalledSignedAggregateSignedAndProof),
@@ -59,7 +59,7 @@ func TestSubmitSignedAggregateSelectionProof_BadRequest(t *testing.T) {
ctx := context.Background()
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/validator/aggregate_and_proofs",
nil,
bytes.NewBuffer(marshalledSignedAggregateSignedAndProof),

View File

@@ -46,7 +46,7 @@ func TestSubmitSignedContributionAndProof_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
submitSignedContributionAndProofTestEndpoint,
nil,
bytes.NewBuffer(marshalledContributionAndProofs),
@@ -121,7 +121,7 @@ func TestSubmitSignedContributionAndProof_Error(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
if testCase.httpRequestExpected {
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
submitSignedContributionAndProofTestEndpoint,
gomock.Any(),
gomock.Any(),

View File

@@ -47,7 +47,7 @@ func TestSubscribeCommitteeSubnets_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
subscribeCommitteeSubnetsTestEndpoint,
nil,
bytes.NewBuffer(committeeSubscriptionsBytes),
@@ -209,7 +209,7 @@ func TestSubscribeCommitteeSubnets_Error(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
if testCase.expectSubscribeRestCall {
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
subscribeCommitteeSubnetsTestEndpoint,
gomock.Any(),
gomock.Any(),

View File

@@ -104,7 +104,7 @@ func TestGetAggregatedSyncSelections(t *testing.T) {
ctx := context.Background()
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
"/eth/v1/validator/sync_committee_selections",
nil,
bytes.NewBuffer(reqBody),

View File

@@ -46,7 +46,7 @@ func TestSubmitSyncMessage_Valid(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/beacon/pool/sync_committees",
nil,
bytes.NewBuffer(marshalledJsonRegistrations),
@@ -75,7 +75,7 @@ func TestSubmitSyncMessage_BadRequest(t *testing.T) {
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
context.Background(),
gomock.Any(),
"/eth/v1/beacon/pool/sync_committees",
nil,
gomock.Any(),
@@ -139,7 +139,7 @@ func TestGetSyncMessageBlockRoot(t *testing.T) {
ctx := context.Background()
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks/head/root",
&structs.BlockRootResponse{},
).SetArg(
@@ -209,7 +209,7 @@ func TestGetSyncCommitteeContribution(t *testing.T) {
ctx := context.Background()
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/blocks/head/root",
&structs.BlockRootResponse{},
).SetArg(
@@ -224,7 +224,7 @@ func TestGetSyncCommitteeContribution(t *testing.T) {
).Times(1)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
fmt.Sprintf("/eth/v1/validator/sync_committee_contribution?beacon_block_root=%s&slot=%d&subcommittee_index=%d",
blockRoot, uint64(request.Slot), request.SubnetId),
&structs.ProduceSyncCommitteeContributionResponse{},
@@ -316,7 +316,7 @@ func TestGetSyncSubCommitteeIndex(t *testing.T) {
require.NoError(t, err)
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
validatorsEndpoint,
nil,
bytes.NewBuffer(valsReqBytes),
@@ -351,7 +351,7 @@ func TestGetSyncSubCommitteeIndex(t *testing.T) {
query := buildURL("/eth/v1/beacon/states/head/validators", queryParams)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
query,
&structs.GetValidatorsResponse{},
).Return(
@@ -368,7 +368,7 @@ func TestGetSyncSubCommitteeIndex(t *testing.T) {
}
jsonRestHandler.EXPECT().Post(
ctx,
gomock.Any(),
fmt.Sprintf("%s/%d", syncDutiesEndpoint, slots.ToEpoch(slot)),
nil,
bytes.NewBuffer(validatorIndicesBytes),

View File

@@ -118,7 +118,7 @@ func TestGetValidatorCount(t *testing.T) {
// Expect node version endpoint call.
var nodeVersionResponse structs.GetVersionResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/node/version",
&nodeVersionResponse,
).Return(
@@ -130,7 +130,7 @@ func TestGetValidatorCount(t *testing.T) {
var validatorCountResponse structs.GetValidatorCountResponse
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/states/head/validator_count?status=active",
&validatorCountResponse,
).Return(

View File

@@ -24,7 +24,7 @@ func TestWaitForChainStart_ValidGenesis(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -91,7 +91,7 @@ func TestWaitForChainStart_BadGenesis(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -119,7 +119,7 @@ func TestWaitForChainStart_JsonResponseError(t *testing.T) {
genesisResponseJson := structs.GetGenesisResponse{}
jsonRestHandler := mock.NewMockJsonRestHandler(ctrl)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -144,7 +144,7 @@ func TestWaitForChainStart_JsonResponseError404(t *testing.T) {
// First, mock a request that receives a 404 error (which means that the genesis data is not available yet)
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(
@@ -156,7 +156,7 @@ func TestWaitForChainStart_JsonResponseError404(t *testing.T) {
// After receiving a 404 error, mock a request that actually has genesis data available
jsonRestHandler.EXPECT().Get(
ctx,
gomock.Any(),
"/eth/v1/beacon/genesis",
&genesisResponseJson,
).Return(

View File

@@ -329,6 +329,9 @@ func CreateSignedVoluntaryExit(
// Sign randao reveal with randao domain and private key.
func (v *validator) signRandaoReveal(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, epoch primitives.Epoch, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signRandaoReveal")
defer span.End()
domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainRandao[:])
if err != nil {
return nil, errors.Wrap(err, domainDataErr)
@@ -359,6 +362,9 @@ func (v *validator) signRandaoReveal(ctx context.Context, pubKey [fieldparams.BL
// Sign block with proposer domain and private key.
// Returns the signature, block signing root, and any error.
func (v *validator) signBlock(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, epoch primitives.Epoch, slot primitives.Slot, b interfaces.ReadOnlyBeaconBlock) ([]byte, [32]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signBlock")
defer span.End()
domain, err := v.domainData(ctx, epoch, params.BeaconConfig().DomainBeaconProposer[:])
if err != nil {
return nil, [32]byte{}, errors.Wrap(err, domainDataErr)
@@ -397,6 +403,9 @@ func signVoluntaryExit(
exit *ethpb.VoluntaryExit,
slot primitives.Slot,
) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signVoluntaryExit")
defer span.End()
req := &ethpb.DomainRequest{
Epoch: exit.Epoch,
Domain: params.BeaconConfig().DomainVoluntaryExit[:],
@@ -430,6 +439,9 @@ func signVoluntaryExit(
// Graffiti gets the graffiti from cli or file for the validator public key.
func (v *validator) Graffiti(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.Graffiti")
defer span.End()
if v.proposerSettings != nil {
// Check proposer settings for specific key first
if v.proposerSettings.ProposeConfig != nil {
@@ -493,6 +505,9 @@ func (v *validator) Graffiti(ctx context.Context, pubKey [fieldparams.BLSPubkeyL
}
func (v *validator) SetGraffiti(ctx context.Context, pubkey [fieldparams.BLSPubkeyLength]byte, graffiti []byte) error {
ctx, span := trace.StartSpan(ctx, "validator.SetGraffiti")
defer span.End()
if graffiti == nil {
return nil
}
@@ -518,6 +533,9 @@ func (v *validator) SetGraffiti(ctx context.Context, pubkey [fieldparams.BLSPubk
}
func (v *validator) DeleteGraffiti(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte) error {
ctx, span := trace.StartSpan(ctx, "validator.DeleteGraffiti")
defer span.End()
if v.proposerSettings == nil || v.proposerSettings.ProposeConfig == nil {
return errors.New("attempted to delete graffiti without proposer settings, graffiti will default to flag options")
}

View File

@@ -62,6 +62,9 @@ func SubmitValidatorRegistrations(
// Sings validator registration obj with the proposer domain and private key.
func signValidatorRegistration(ctx context.Context, signer iface.SigningFunc, reg *ethpb.ValidatorRegistrationV1) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signValidatorRegistration")
defer span.End()
// Per spec, we want the fork version and genesis validator to be nil.
// Which is genesis value and zero by default.
d, err := signing.ComputeDomain(
@@ -91,6 +94,9 @@ func signValidatorRegistration(ctx context.Context, signer iface.SigningFunc, re
// SignValidatorRegistrationRequest compares and returns either the cached validator registration request or signs a new one.
func (v *validator) SignValidatorRegistrationRequest(ctx context.Context, signer iface.SigningFunc, newValidatorRegistration *ethpb.ValidatorRegistrationV1) (*ethpb.SignedValidatorRegistrationV1, error) {
ctx, span := trace.StartSpan(ctx, "validator.SignValidatorRegistrationRequest")
defer span.End()
signedReg, ok := v.signedValidatorRegistrations[bytesutil.ToBytes48(newValidatorRegistration.Pubkey)]
if ok && isValidatorRegistrationSame(signedReg.Message, newValidatorRegistration) {
return signedReg, nil

View File

@@ -160,6 +160,9 @@ func onAccountsChanged(ctx context.Context, v iface.Validator, current [][48]byt
}
func initializeValidatorAndGetHeadSlot(ctx context.Context, v iface.Validator) (primitives.Slot, error) {
ctx, span := trace.StartSpan(ctx, "validator.initializeValidatorAndGetHeadSlot")
defer span.End()
ticker := time.NewTicker(backOffPeriod)
defer ticker.Stop()

View File

@@ -192,6 +192,9 @@ func (v *validator) SubmitSignedContributionAndProof(ctx context.Context, slot p
// Signs and returns selection proofs per validator for slot and pub key.
func (v *validator) selectionProofs(ctx context.Context, slot primitives.Slot, pubKey [fieldparams.BLSPubkeyLength]byte, indexRes *ethpb.SyncSubcommitteeIndexResponse, validatorIndex primitives.ValidatorIndex) ([][]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.selectionProofs")
defer span.End()
selectionProofs := make([][]byte, len(indexRes.Indices))
cfg := params.BeaconConfig()
size := cfg.SyncCommitteeSize
@@ -231,6 +234,9 @@ func (v *validator) selectionProofs(ctx context.Context, slot primitives.Slot, p
// Signs input slot with domain sync committee selection proof. This is used to create the signature for sync committee selection.
func (v *validator) signSyncSelectionData(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, index uint64, slot primitives.Slot) (signature []byte, err error) {
ctx, span := trace.StartSpan(ctx, "validator.signSyncSelectionData")
defer span.End()
domain, err := v.domainData(ctx, slots.ToEpoch(slot), params.BeaconConfig().DomainSyncCommitteeSelectionProof[:])
if err != nil {
return nil, err
@@ -258,6 +264,9 @@ func (v *validator) signSyncSelectionData(ctx context.Context, pubKey [fieldpara
// This returns the signature of validator signing over sync committee contribution and proof object.
func (v *validator) signContributionAndProof(ctx context.Context, pubKey [fieldparams.BLSPubkeyLength]byte, c *ethpb.ContributionAndProof, slot primitives.Slot) ([]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.signContributionAndProof")
defer span.End()
d, err := v.domainData(ctx, slots.ToEpoch(c.Contribution.Slot), params.BeaconConfig().DomainContributionAndProof[:])
if err != nil {
return nil, err

View File

@@ -136,6 +136,9 @@ func (v *validator) Done() {
// WaitForKeymanagerInitialization checks if the validator needs to wait for keymanager initialization.
func (v *validator) WaitForKeymanagerInitialization(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.WaitForKeymanagerInitialization")
defer span.End()
genesisRoot, err := v.db.GenesisValidatorsRoot(ctx)
if err != nil {
return errors.Wrap(err, "unable to retrieve valid genesis validators root while initializing key manager")
@@ -179,6 +182,9 @@ func waitForWebWalletInitialization(
walletInitializedEvent *event.Feed,
walletChan chan *wallet.Wallet,
) (keymanager.IKeymanager, error) {
ctx, span := trace.StartSpan(ctx, "validator.waitForWebWalletInitialization")
defer span.End()
sub := walletInitializedEvent.Subscribe(walletChan)
defer sub.Unsubscribe()
for {
@@ -201,6 +207,9 @@ func waitForWebWalletInitialization(
// recheckKeys checks if the validator has any keys that need to be rechecked.
// The keymanager implements a subscription to push these updates to the validator.
func recheckKeys(ctx context.Context, valDB db.Database, km keymanager.IKeymanager) {
ctx, span := trace.StartSpan(ctx, "validator.recheckKeys")
defer span.End()
var validatingKeys [][fieldparams.BLSPubkeyLength]byte
var err error
validatingKeys, err = km.FetchValidatingPublicKeys(ctx)
@@ -215,6 +224,9 @@ func recheckKeys(ctx context.Context, valDB db.Database, km keymanager.IKeymanag
// to accounts changes in the keymanager, then updates those keys'
// buckets in bolt DB if a bucket for a key does not exist.
func recheckValidatingKeysBucket(ctx context.Context, valDB db.Database, km keymanager.IKeymanager) {
ctx, span := trace.StartSpan(ctx, "validator.recheckValidatingKeysBucket")
defer span.End()
importedKeymanager, ok := km.(*local.Keymanager)
if !ok {
return
@@ -425,6 +437,9 @@ func (v *validator) SlotDeadline(slot primitives.Slot) time.Time {
// CheckDoppelGanger checks if the current actively provided keys have
// any duplicates active in the network.
func (v *validator) CheckDoppelGanger(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "validator.CheckDoppelganger")
defer span.End()
if !features.Get().EnableDoppelGanger {
return nil
}
@@ -531,7 +546,7 @@ func (v *validator) UpdateDuties(ctx context.Context, slot primitives.Slot) erro
}
ctx, cancel := context.WithDeadline(ctx, v.SlotDeadline(ss))
defer cancel()
ctx, span := trace.StartSpan(ctx, "validator.UpdateAssignments")
ctx, span := trace.StartSpan(ctx, "validator.UpdateDuties")
defer span.End()
validatingKeys, err := v.km.FetchValidatingPublicKeys(ctx)
@@ -603,6 +618,9 @@ func (v *validator) UpdateDuties(ctx context.Context, slot primitives.Slot) erro
// subscribeToSubnets iterates through each validator duty, signs each slot, and asks beacon node
// to eagerly subscribe to subnets so that the aggregator has attestations to aggregate.
func (v *validator) subscribeToSubnets(ctx context.Context, duties *ethpb.DutiesResponse) error {
ctx, span := trace.StartSpan(ctx, "validator.subscribeToSubnets")
defer span.End()
subscribeSlots := make([]primitives.Slot, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
subscribeCommitteeIndices := make([]primitives.CommitteeIndex, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
subscribeIsAggregator := make([]bool, 0, len(duties.CurrentEpochDuties)+len(duties.NextEpochDuties))
@@ -685,6 +703,9 @@ func (v *validator) subscribeToSubnets(ctx context.Context, duties *ethpb.Duties
// validator is known to not have a roles at the slot. Returns UNKNOWN if the
// validator assignments are unknown. Otherwise returns a valid ValidatorRole map.
func (v *validator) RolesAt(ctx context.Context, slot primitives.Slot) (map[[fieldparams.BLSPubkeyLength]byte][]iface.ValidatorRole, error) {
ctx, span := trace.StartSpan(ctx, "validator.RolesAt")
defer span.End()
v.dutiesLock.RLock()
defer v.dutiesLock.RUnlock()
rolesAt := make(map[[fieldparams.BLSPubkeyLength]byte][]iface.ValidatorRole)
@@ -769,6 +790,9 @@ func (v *validator) isAggregator(
pubKey [fieldparams.BLSPubkeyLength]byte,
validatorIndex primitives.ValidatorIndex,
) (bool, error) {
ctx, span := trace.StartSpan(ctx, "validator.isAggregator")
defer span.End()
modulo := uint64(1)
if len(committeeIndex)/int(params.BeaconConfig().TargetAggregatorsPerCommittee) > 1 {
modulo = uint64(len(committeeIndex)) / params.BeaconConfig().TargetAggregatorsPerCommittee
@@ -804,6 +828,9 @@ func (v *validator) isAggregator(
// modulo = max(1, SYNC_COMMITTEE_SIZE // SYNC_COMMITTEE_SUBNET_COUNT // TARGET_AGGREGATORS_PER_SYNC_SUBCOMMITTEE)
// return bytes_to_uint64(hash(signature)[0:8]) % modulo == 0
func (v *validator) isSyncCommitteeAggregator(ctx context.Context, slot primitives.Slot, pubKey [fieldparams.BLSPubkeyLength]byte, validatorIndex primitives.ValidatorIndex) (bool, error) {
ctx, span := trace.StartSpan(ctx, "validator.isSyncCommitteeAggregator")
defer span.End()
res, err := v.validatorClient.SyncSubcommitteeIndex(ctx, &ethpb.SyncSubcommitteeIndexRequest{
PublicKey: pubKey[:],
Slot: slot,
@@ -855,6 +882,9 @@ func (v *validator) isSyncCommitteeAggregator(ctx context.Context, slot primitiv
// is very rare, a validator should check these data every epoch to be sure the validator is
// participating on the correct fork version.
func (v *validator) UpdateDomainDataCaches(ctx context.Context, slot primitives.Slot) {
ctx, span := trace.StartSpan(ctx, "validator.UpdateDomainDataCaches")
defer span.End()
for _, d := range [][]byte{
params.BeaconConfig().DomainRandao[:],
params.BeaconConfig().DomainBeaconAttester[:],
@@ -873,6 +903,9 @@ func (v *validator) UpdateDomainDataCaches(ctx context.Context, slot primitives.
}
func (v *validator) domainData(ctx context.Context, epoch primitives.Epoch, domain []byte) (*ethpb.DomainResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.domainData")
defer span.End()
v.domainDataLock.RLock()
req := &ethpb.DomainRequest{
@@ -1019,6 +1052,9 @@ func (v *validator) ProposerSettings() *proposer.Settings {
// SetProposerSettings sets and saves the passed in proposer settings overriding the in memory one
func (v *validator) SetProposerSettings(ctx context.Context, settings *proposer.Settings) error {
ctx, span := trace.StartSpan(ctx, "validator.SetProposerSettings")
defer span.End()
if v.db == nil {
return errors.New("db is not set")
}
@@ -1031,6 +1067,9 @@ func (v *validator) SetProposerSettings(ctx context.Context, settings *proposer.
// PushProposerSettings calls the prepareBeaconProposer RPC to set the fee recipient and also the register validator API if using a custom builder.
func (v *validator) PushProposerSettings(ctx context.Context, km keymanager.IKeymanager, slot primitives.Slot, deadline time.Time) error {
ctx, span := trace.StartSpan(ctx, "validator.PushProposerSettings")
defer span.End()
if km == nil {
return errors.New("keymanager is nil when calling PrepareBeaconProposer")
}
@@ -1132,6 +1171,9 @@ func (v *validator) ChangeHost() {
}
func (v *validator) filterAndCacheActiveKeys(ctx context.Context, pubkeys [][fieldparams.BLSPubkeyLength]byte, slot primitives.Slot) ([][fieldparams.BLSPubkeyLength]byte, error) {
ctx, span := trace.StartSpan(ctx, "validator.filterAndCacheActiveKeys")
defer span.End()
filteredKeys := make([][fieldparams.BLSPubkeyLength]byte, 0)
statusRequestKeys := make([][]byte, 0)
for _, k := range pubkeys {
@@ -1214,6 +1256,9 @@ func (v *validator) buildSignedRegReqs(
activePubkeys [][fieldparams.BLSPubkeyLength]byte,
signer iface.SigningFunc,
) []*ethpb.SignedValidatorRegistrationV1 {
ctx, span := trace.StartSpan(ctx, "validator.buildSignedRegReqs")
defer span.End()
var signedValRegRegs []*ethpb.SignedValidatorRegistrationV1
if v.ProposerSettings() == nil {
return signedValRegRegs
@@ -1297,6 +1342,9 @@ func (v *validator) buildSignedRegReqs(
}
func (v *validator) validatorIndex(ctx context.Context, pubkey [fieldparams.BLSPubkeyLength]byte) (primitives.ValidatorIndex, bool, error) {
ctx, span := trace.StartSpan(ctx, "validator.validatorIndex")
defer span.End()
resp, err := v.validatorClient.ValidatorIndex(ctx, &ethpb.ValidatorIndexRequest{PublicKey: pubkey[:]})
switch {
case status.Code(err) == codes.NotFound:
@@ -1316,6 +1364,9 @@ func (v *validator) validatorIndex(ctx context.Context, pubkey [fieldparams.BLSP
}
func (v *validator) aggregatedSelectionProofs(ctx context.Context, duties *ethpb.DutiesResponse) error {
ctx, span := trace.StartSpan(ctx, "validator.aggregatedSelectionProofs")
defer span.End()
// Create new instance of attestation selections map.
v.newAttSelections()

View File

@@ -2075,7 +2075,7 @@ func TestValidator_buildPrepProposerReqs_WithoutDefaultConfig(t *testing.T) {
ctx := context.Background()
client := validatormock.NewMockValidatorClient(ctrl)
client.EXPECT().ValidatorIndex(
ctx,
gomock.Any(),
&ethpb.ValidatorIndexRequest{
PublicKey: pubkey2[:],
},
@@ -2084,7 +2084,7 @@ func TestValidator_buildPrepProposerReqs_WithoutDefaultConfig(t *testing.T) {
}, nil)
client.EXPECT().ValidatorIndex(
ctx,
gomock.Any(),
&ethpb.ValidatorIndexRequest{
PublicKey: pubkey3[:],
},
@@ -2210,7 +2210,7 @@ func TestValidator_buildPrepProposerReqs_WithDefaultConfig(t *testing.T) {
client := validatormock.NewMockValidatorClient(ctrl)
client.EXPECT().ValidatorIndex(
ctx,
gomock.Any(),
&ethpb.ValidatorIndexRequest{
PublicKey: pubkey2[:],
},
@@ -2219,7 +2219,7 @@ func TestValidator_buildPrepProposerReqs_WithDefaultConfig(t *testing.T) {
}, nil)
client.EXPECT().ValidatorIndex(
ctx,
gomock.Any(),
&ethpb.ValidatorIndexRequest{
PublicKey: pubkey3[:],
},

View File

@@ -71,7 +71,6 @@ go_library(
"@com_github_gorilla_mux//:go_default_library",
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_fastssz//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
"@org_golang_google_protobuf//encoding/protojson:go_default_library",

View File

@@ -22,7 +22,6 @@ import (
"github.com/gorilla/mux"
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
"github.com/pkg/errors"
fastssz "github.com/prysmaticlabs/fastssz"
"github.com/prysmaticlabs/prysm/v5/api"
"github.com/prysmaticlabs/prysm/v5/api/gateway"
"github.com/prysmaticlabs/prysm/v5/api/server/middleware"
@@ -122,8 +121,6 @@ func NewValidatorClient(cliCtx *cli.Context) (*ValidatorClient, error) {
}
}
configureFastSSZHashingAlgorithm()
// initialize router used for endpoints
router := newRouter(cliCtx)
// If the --web flag is enabled to administer the validator
@@ -794,7 +791,3 @@ func clearDB(ctx context.Context, dataDir string, force bool, isDatabaseMinimal
return nil
}
func configureFastSSZHashingAlgorithm() {
fastssz.EnableVectorizedHTR = true
}