mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 13:58:09 -05:00
Compare commits
9 Commits
fixSkipSlo
...
test_appro
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
80ded80b4d | ||
|
|
3a7117dcbf | ||
|
|
8888fa4bb3 | ||
|
|
f065209a3e | ||
|
|
e439f4aff6 | ||
|
|
88f8dbecc8 | ||
|
|
2dfe291cf9 | ||
|
|
a80c15f3a9 | ||
|
|
f98f93e6e0 |
16
.github/workflows/approval_check.yaml
vendored
Normal file
16
.github/workflows/approval_check.yaml
vendored
Normal file
@@ -0,0 +1,16 @@
|
||||
name: PR approved and labeled
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
types: [ labeled ]
|
||||
|
||||
jobs:
|
||||
build:
|
||||
if: ${{ github.event.label.name == 'CI' }}
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: trstringer/manual-approval@v1
|
||||
with:
|
||||
secret: ${{ github.TOKEN }}
|
||||
approvers: potuz
|
||||
minimum-approvals: 1
|
||||
@@ -5,6 +5,7 @@ go_library(
|
||||
srcs = [
|
||||
"gateway.go",
|
||||
"log.go",
|
||||
"modifiers.go",
|
||||
"options.go",
|
||||
],
|
||||
importpath = "github.com/prysmaticlabs/prysm/api/gateway",
|
||||
@@ -23,6 +24,7 @@ go_library(
|
||||
"@org_golang_google_grpc//:go_default_library",
|
||||
"@org_golang_google_grpc//connectivity:go_default_library",
|
||||
"@org_golang_google_grpc//credentials:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ go_library(
|
||||
deps = [
|
||||
"//api/grpc:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_gorilla_mux//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -111,7 +111,7 @@ func (m *ApiProxyMiddleware) WithMiddleware(path string) http.HandlerFunc {
|
||||
}
|
||||
}
|
||||
|
||||
if req.Method == "DELETE" {
|
||||
if req.Method == "DELETE" && req.Body != http.NoBody {
|
||||
if errJson := handleDeleteRequestForEndpoint(endpoint, req); errJson != nil {
|
||||
WriteError(w, errJson, nil)
|
||||
return
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/wealdtech/go-bytesutil"
|
||||
@@ -100,6 +101,20 @@ func base64ToHexProcessor(v reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToChecksumAddressProcessor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
// Empty hex values are represented as "0x".
|
||||
v.SetString("0x")
|
||||
return nil
|
||||
}
|
||||
b, err := base64.StdEncoding.DecodeString(v.String())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
v.SetString(common.BytesToAddress(b).Hex())
|
||||
return nil
|
||||
}
|
||||
|
||||
func base64ToUint256Processor(v reflect.Value) error {
|
||||
if v.String() == "" {
|
||||
return nil
|
||||
|
||||
@@ -104,6 +104,8 @@ func ReadGrpcResponseBody(r io.Reader) ([]byte, ErrorJson) {
|
||||
}
|
||||
|
||||
// HandleGrpcResponseError acts on an error that resulted from a grpc-gateway's response.
|
||||
// Whether there was an error is indicated by the bool return value. In case of an error,
|
||||
// there is no need to write to the response because it's taken care of by the function.
|
||||
func HandleGrpcResponseError(errJson ErrorJson, resp *http.Response, respBody []byte, w http.ResponseWriter) (bool, ErrorJson) {
|
||||
responseHasError := false
|
||||
if err := json.Unmarshal(respBody, errJson); err != nil {
|
||||
@@ -149,6 +151,10 @@ func ProcessMiddlewareResponseFields(responseContainer interface{}) ErrorJson {
|
||||
tag: "hex",
|
||||
f: base64ToHexProcessor,
|
||||
},
|
||||
{
|
||||
tag: "address",
|
||||
f: base64ToChecksumAddressProcessor,
|
||||
},
|
||||
{
|
||||
tag: "enum",
|
||||
f: enumToLowercaseProcessor,
|
||||
|
||||
@@ -31,21 +31,25 @@ func defaultRequestContainer() *testRequestContainer {
|
||||
}
|
||||
|
||||
type testResponseContainer struct {
|
||||
TestString string
|
||||
TestHex string `hex:"true"`
|
||||
TestEmptyHex string `hex:"true"`
|
||||
TestUint256 string `uint256:"true"`
|
||||
TestEnum string `enum:"true"`
|
||||
TestTime string `time:"true"`
|
||||
TestString string
|
||||
TestHex string `hex:"true"`
|
||||
TestEmptyHex string `hex:"true"`
|
||||
TestAddress string `address:"true"`
|
||||
TestEmptyAddress string `address:"true"`
|
||||
TestUint256 string `uint256:"true"`
|
||||
TestEnum string `enum:"true"`
|
||||
TestTime string `time:"true"`
|
||||
}
|
||||
|
||||
func defaultResponseContainer() *testResponseContainer {
|
||||
return &testResponseContainer{
|
||||
TestString: "test string",
|
||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||
TestEmptyHex: "",
|
||||
TestEnum: "Test Enum",
|
||||
TestTime: "2006-01-02T15:04:05Z",
|
||||
TestString: "test string",
|
||||
TestHex: "Zm9v", // base64 encoding of "foo"
|
||||
TestEmptyHex: "",
|
||||
TestAddress: "Zm9v",
|
||||
TestEmptyAddress: "",
|
||||
TestEnum: "Test Enum",
|
||||
TestTime: "2006-01-02T15:04:05Z",
|
||||
|
||||
// base64 encoding of 4196 in little-endian
|
||||
TestUint256: "ZBAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAA=",
|
||||
@@ -247,6 +251,8 @@ func TestProcessMiddlewareResponseFields(t *testing.T) {
|
||||
require.Equal(t, true, errJson == nil)
|
||||
assert.Equal(t, "0x666f6f", container.TestHex)
|
||||
assert.Equal(t, "0x", container.TestEmptyHex)
|
||||
assert.Equal(t, "0x0000000000000000000000000000000000666F6f", container.TestAddress)
|
||||
assert.Equal(t, "0x", container.TestEmptyAddress)
|
||||
assert.Equal(t, "4196", container.TestUint256)
|
||||
assert.Equal(t, "test enum", container.TestEnum)
|
||||
assert.Equal(t, "1136214245", container.TestTime)
|
||||
@@ -292,7 +298,7 @@ func TestWriteMiddlewareResponseHeadersAndBody(t *testing.T) {
|
||||
v, ok = writer.Header()["Content-Length"]
|
||||
require.Equal(t, true, ok, "header not found")
|
||||
require.Equal(t, 1, len(v), "wrong number of header values")
|
||||
assert.Equal(t, "181", v[0])
|
||||
assert.Equal(t, "224", v[0])
|
||||
assert.Equal(t, 204, writer.Code)
|
||||
assert.DeepEqual(t, responseJson, writer.Body.Bytes())
|
||||
})
|
||||
|
||||
30
api/gateway/modifiers.go
Normal file
30
api/gateway/modifiers.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package gateway
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
gwruntime "github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func HttpResponseModifier(ctx context.Context, w http.ResponseWriter, resp proto.Message) error {
|
||||
md, ok := gwruntime.ServerMetadataFromContext(ctx)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
// set http status code
|
||||
if vals := md.HeaderMD.Get("x-http-code"); len(vals) > 0 {
|
||||
code, err := strconv.Atoi(vals[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// delete the headers to not expose any grpc-metadata in http response
|
||||
delete(md.HeaderMD, "x-http-code")
|
||||
delete(w.Header(), "Grpc-Metadata-X-Http-Code")
|
||||
w.WriteHeader(code)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -37,8 +37,8 @@ func prepareForkchoiceState(
|
||||
blockRoot [32]byte,
|
||||
parentRoot [32]byte,
|
||||
payloadHash [32]byte,
|
||||
justifiedEpoch types.Epoch,
|
||||
finalizedEpoch types.Epoch,
|
||||
justified *ethpb.Checkpoint,
|
||||
finalized *ethpb.Checkpoint,
|
||||
) (state.BeaconState, [32]byte, error) {
|
||||
blockHeader := ðpb.BeaconBlockHeader{
|
||||
ParentRoot: parentRoot[:],
|
||||
@@ -48,20 +48,12 @@ func prepareForkchoiceState(
|
||||
BlockHash: payloadHash[:],
|
||||
}
|
||||
|
||||
justifiedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: justifiedEpoch,
|
||||
}
|
||||
|
||||
finalizedCheckpoint := ðpb.Checkpoint{
|
||||
Epoch: finalizedEpoch,
|
||||
}
|
||||
|
||||
base := ðpb.BeaconStateBellatrix{
|
||||
Slot: slot,
|
||||
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
|
||||
BlockRoots: make([][]byte, 1),
|
||||
CurrentJustifiedCheckpoint: justifiedCheckpoint,
|
||||
FinalizedCheckpoint: finalizedCheckpoint,
|
||||
CurrentJustifiedCheckpoint: justified,
|
||||
FinalizedCheckpoint: finalized,
|
||||
LatestExecutionPayloadHeader: executionHeader,
|
||||
LatestBlockHeader: blockHeader,
|
||||
}
|
||||
@@ -328,19 +320,21 @@ func TestService_HeadGenesisValidatorsRoot(t *testing.T) {
|
||||
func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -358,22 +352,24 @@ func TestService_ChainHeads_ProtoArray(t *testing.T) {
|
||||
func TestService_ChainHeads_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 103, [32]byte{'d'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 104, [32]byte{'e'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -453,10 +449,12 @@ func TestService_IsOptimistic_ProtoArray(t *testing.T) {
|
||||
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -472,11 +470,13 @@ func TestService_IsOptimistic_DoublyLinkedTree(t *testing.T) {
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -496,10 +496,12 @@ func TestService_IsOptimisticBeforeBellatrix(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: protoarray.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -511,10 +513,12 @@ func TestService_IsOptimisticForRoot_ProtoArray(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DoublyLinkedTree(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{ForkChoiceStore: doublylinkedtree.New()}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 100, [32]byte{'a'}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, c.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
|
||||
@@ -90,7 +90,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, arg *notifyForkcho
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := s.updateHead(ctx, s.justifiedBalances.balances)
|
||||
r, err := s.cfg.ForkChoiceStore.Head(ctx, s.justifiedBalances.balances)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -57,13 +57,15 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
state: st,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, altairBlkRoot, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, bellatrixBlkRoot, altairBlkRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -300,25 +302,27 @@ func Test_NotifyForkchoiceUpdateRecursive(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
service.justifiedBalances.balances = []uint64{50, 100, 200}
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 1, bra, [32]byte{}, [32]byte{'A'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 2, brb, bra, [32]byte{'B'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 3, brc, brb, [32]byte{'C'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 4, brd, brc, [32]byte{'D'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 5, bre, brb, [32]byte{'E'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 6, brf, bre, [32]byte{'F'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 7, brg, bre, [32]byte{'G'}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -405,12 +409,16 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
service.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
@@ -559,7 +567,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
root := [32]byte{'a'}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, root, root, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
@@ -830,7 +838,7 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
fcs := protoarray.New()
|
||||
fcs := doublylinkedtree.New()
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
@@ -846,9 +854,15 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
fjc := &forkchoicetypes.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash}
|
||||
require.NoError(t, fcs.UpdateJustifiedCheckpoint(fjc))
|
||||
require.NoError(t, fcs.UpdateFinalizedCheckpoint(fjc))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
fcs.SetOriginRoot(genesisRoot)
|
||||
genesisSummary := ðpb.StateSummary{
|
||||
Root: genesisStateRoot[:],
|
||||
Slot: 0,
|
||||
@@ -879,7 +893,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
Slot: 320,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, 10, 10)
|
||||
tenjc := ðpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
|
||||
tenfc := ðpb.Checkpoint{Epoch: 10, Root: genesisRoot[:]}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 320, opRoot, genesisRoot, params.BeaconConfig().ZeroHash, tenjc, tenfc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
@@ -908,7 +924,9 @@ func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
Slot: 640,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 20, 20)
|
||||
twentyjc := ðpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
|
||||
twentyfc := ðpb.Checkpoint{Epoch: 20, Root: validRoot[:]}
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 640, validRoot, genesisRoot, params.BeaconConfig().ZeroHash, twentyjc, twentyfc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
|
||||
|
||||
@@ -10,11 +10,7 @@ import (
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/interfaces"
|
||||
@@ -40,7 +36,7 @@ func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", jp.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
headRoot, err := s.updateHead(ctx, balances)
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
}
|
||||
@@ -63,58 +59,6 @@ type head struct {
|
||||
state state.BeaconState // current head state.
|
||||
}
|
||||
|
||||
// Determined the head from the fork choice service and saves its new data
|
||||
// (head root, head block, and head state) to the local service cache.
|
||||
func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
|
||||
defer span.End()
|
||||
|
||||
// Get head from the fork choice service.
|
||||
f, err := s.store.FinalizedCheckpt()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get finalized checkpoint")
|
||||
}
|
||||
j, err := s.store.JustifiedCheckpt()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get justified checkpoint")
|
||||
}
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
headStartRoot := s.ensureRootNotZeros(bytesutil.ToBytes32(j.Root))
|
||||
|
||||
// In order to process head, fork choice store requires justified info.
|
||||
// If the fork choice store is missing justified block info, a node should
|
||||
// re-initiate fork choice store using the latest justified info.
|
||||
// This recovers a fatal condition and should not happen in run time.
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.getBlock(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
st, err := s.cfg.StateGen.StateByRoot(ctx, s.ensureRootNotZeros(headStartRoot))
|
||||
if err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New()
|
||||
} else {
|
||||
s.cfg.ForkChoiceStore = protoarray.New()
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, st, f, j); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: j.Epoch, Root: headStartRoot}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: f.Epoch, Root: s.ensureRootNotZeros(bytesutil.ToBytes32(f.Root))}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(jc); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(fc); err != nil {
|
||||
return [32]byte{}, err
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
}
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock interfaces.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
|
||||
@@ -52,7 +52,9 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
@@ -70,7 +72,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -102,7 +104,9 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), oldBlock))
|
||||
oldRoot, err := oldBlock.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, oldBlock.Block().Slot(), oldRoot, bytesutil.ToBytes32(oldBlock.Block().ParentRoot()), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
service.head = &head{
|
||||
@@ -122,7 +126,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
newRoot, err := newHeadBlock.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wsb.Block().Slot(), newRoot, bytesutil.ToBytes32(wsb.Block().ParentRoot()), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
headState, err := util.NewBeaconState()
|
||||
@@ -158,26 +162,6 @@ func TestCacheJustifiedStateBalances_CanCache(t *testing.T) {
|
||||
require.DeepEqual(t, balances, state.Balances(), "Incorrect justified balances")
|
||||
}
|
||||
|
||||
func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
service := setupBeaconChain(t, beaconDB)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(context.Background(), wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), state, r))
|
||||
|
||||
service.store.SetJustifiedCheckptAndPayloadHash(ðpb.Checkpoint{Root: r[:]}, [32]byte{'a'})
|
||||
service.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{'b'})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
_, err = service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
t.Run("genesis_state_root", func(t *testing.T) {
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
@@ -288,7 +272,9 @@ func TestSaveOrphanedAtts_NoCommonAncestor(t *testing.T) {
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
@@ -341,11 +327,13 @@ func TestSaveOrphanedAtts(t *testing.T) {
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
@@ -402,11 +390,13 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
blk4.Block.ParentRoot = rG[:]
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
@@ -464,10 +454,12 @@ func TestSaveOrphanedAtts_NoCommonAncestor_DoublyLinkedTrie(t *testing.T) {
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
@@ -526,10 +518,12 @@ func TestSaveOrphanedAtts_DoublyLinkedTrie(t *testing.T) {
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk3, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
@@ -592,10 +586,12 @@ func TestSaveOrphanedAtts_CanFilter_DoublyLinkedTrie(t *testing.T) {
|
||||
r4, err := blk4.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
for _, blk := range []*ethpb.SignedBeaconBlock{blkG, blk1, blk2, blk4} {
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, blk.Block.Slot, r, bytesutil.ToBytes32(blk.Block.ParentRoot), [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.ForkChoicer().InsertNode(ctx, state, blkRoot))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
@@ -620,6 +616,10 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
ojp := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojp, ojp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
@@ -641,10 +641,10 @@ func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
st, blkRoot, err := prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, 0, 0)
|
||||
st, blkRoot, err = prepareForkchoiceState(ctx, 0, bellatrixBlkRoot, [32]byte{}, [32]byte{}, fcp, fcp)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, st, blkRoot))
|
||||
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
|
||||
newRoot, err := service.cfg.ForkChoiceStore.Head(ctx, []uint64{1, 2})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, headRoot, newRoot)
|
||||
require.Equal(t, headRoot, service.headRoot())
|
||||
|
||||
@@ -35,20 +35,22 @@ func TestService_newSlot(t *testing.T) {
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wsb))
|
||||
bj, err := genesis.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, [32]byte{}, [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // genesis
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 32, [32]byte{'a'}, [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // finalized
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'b'}, [32]byte{'a'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, bj, [32]byte{'a'}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // best justified
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 97, [32]byte{'d'}, [32]byte{}, [32]byte{}, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot)) // bad
|
||||
|
||||
|
||||
@@ -269,7 +269,9 @@ func TestStore_OnAttestation_Ok_ProtoArray(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
ojc := ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 1, Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
@@ -297,7 +299,9 @@ func TestStore_OnAttestation_Ok_DoublyLinkedTree(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
ofc := ðpb.Checkpoint{Epoch: 0, Root: tRoot[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.OnAttestation(ctx, att[0]))
|
||||
@@ -604,10 +608,12 @@ func TestVerifyFinalizedConsistency_IsCanonical(t *testing.T) {
|
||||
r33, err := b33.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, b32.Block.Slot, r32, [32]byte{}, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, b33.Block.Slot, r33, r32, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
|
||||
@@ -221,15 +221,6 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(postState.CurrentJustifiedCheckpoint(), h)
|
||||
// Update Forkchoice checkpoints
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: psj.Epoch, Root: bytesutil.ToBytes32(psj.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.cfg.ForkChoiceStore.UpdateFinalizedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: psf.Epoch, Root: bytesutil.ToBytes32(psf.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
balances, err := s.justifiedBalances.get(ctx, bytesutil.ToBytes32(justified.Root))
|
||||
@@ -237,7 +228,7 @@ func (s *Service) onBlock(ctx context.Context, signed interfaces.SignedBeaconBlo
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
headRoot, err := s.updateHead(ctx, balances)
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
|
||||
@@ -217,13 +217,7 @@ func (s *Service) updateJustified(ctx context.Context, state state.ReadOnlyBeaco
|
||||
return err
|
||||
}
|
||||
s.store.SetJustifiedCheckptAndPayloadHash(cpt, h)
|
||||
// Update forkchoice's justified checkpoint
|
||||
if err := s.cfg.ForkChoiceStore.UpdateJustifiedCheckpoint(&forkchoicetypes.Checkpoint{
|
||||
Epoch: cpt.Epoch, Root: bytesutil.ToBytes32(cpt.Root)}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -321,7 +315,8 @@ func (s *Service) ancestorByForkChoiceStore(ctx context.Context, r [32]byte, slo
|
||||
if !s.cfg.ForkChoiceStore.HasParent(r) {
|
||||
return nil, errors.New("could not find root in fork choice store")
|
||||
}
|
||||
return s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
root, err := s.cfg.ForkChoiceStore.AncestorRoot(ctx, r, slot)
|
||||
return root[:], err
|
||||
}
|
||||
|
||||
// This retrieves an ancestor root using DB. The look up is recursively looking up DB. Slower than `ancestorByForkChoiceStore`.
|
||||
|
||||
@@ -1341,13 +1341,15 @@ func TestAncestor_CanUseForkchoice(t *testing.T) {
|
||||
b200.Block.ParentRoot = r100[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := util.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
beaconBlock.Block.ParentRoot = bytesutil.PadTo(b.Block.ParentRoot, 32)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), b.Block.Slot, r, bytesutil.ToBytes32(b.Block.ParentRoot), params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
}
|
||||
@@ -1387,6 +1389,8 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
b200.Block.ParentRoot = r100[:]
|
||||
r200, err := b200.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
for _, b := range []*ethpb.SignedBeaconBlock{b1, b100, b200} {
|
||||
beaconBlock := util.NewBeaconBlock()
|
||||
beaconBlock.Block.Slot = b.Block.Slot
|
||||
@@ -1396,7 +1400,7 @@ func TestAncestor_CanUseDB(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb)) // Saves blocks to DB.
|
||||
}
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(context.Background(), 200, r200, r200, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
|
||||
@@ -160,7 +160,7 @@ func (s *Service) UpdateHead(ctx context.Context) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
newHeadRoot, err := s.updateHead(ctx, balances)
|
||||
newHeadRoot, err := s.cfg.ForkChoiceStore.Head(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
@@ -120,7 +121,9 @@ func TestProcessAttestations_Ok(t *testing.T) {
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
ofc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
ojc := ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, ojc, ofc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
@@ -211,38 +214,58 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
func TestService_ProcessAttestationsAndUpdateHead(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
opts := testServiceOptsWithDB(t)
|
||||
opts = append(opts, WithAttestationPool(attestations.NewPool()), WithStateNotifier(&mockBeaconNode{}))
|
||||
fcs := protoarray.New()
|
||||
opts = append(opts,
|
||||
WithAttestationPool(attestations.NewPool()),
|
||||
WithStateNotifier(&mockBeaconNode{}),
|
||||
WithForkChoiceStore(fcs),
|
||||
)
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
service.genesisTime = prysmTime.Now().Add(-1 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
service.genesisTime = prysmTime.Now().Add(-2 * time.Duration(params.BeaconConfig().SecondsPerSlot) * time.Second)
|
||||
genesisState, pks := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, genesisState.SetGenesisTime(uint64(prysmTime.Now().Unix())-params.BeaconConfig().SecondsPerSlot))
|
||||
require.NoError(t, service.saveGenesisData(ctx, genesisState))
|
||||
atts, err := util.GenerateAttestations(genesisState, pks, 1, 0, false)
|
||||
require.NoError(t, err)
|
||||
tRoot := bytesutil.ToBytes32(atts[0].Data.Target.Root)
|
||||
copied := genesisState.Copy()
|
||||
copied, err = transition.ProcessSlots(ctx, copied, 1)
|
||||
// Generate a new block for attesters to attest
|
||||
blk, err := util.GenerateFullBlock(copied, pks, util.DefaultBlockGenConfig(), 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, copied, tRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{Root: tRoot[:]}))
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, tRoot, tRoot, params.BeaconConfig().ZeroHash, 1, 1)
|
||||
tRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.onBlock(ctx, wsb, tRoot))
|
||||
copied, err = service.cfg.StateGen.StateByRoot(ctx, tRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, fcs.NodeCount())
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
|
||||
// Generate attestatios for this block in Slot 1
|
||||
atts, err := util.GenerateAttestations(copied, pks, 1, 1, false)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.NoError(t, service.cfg.AttPool.SaveForkchoiceAttestations(atts))
|
||||
b := util.NewBeaconBlock()
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
// Verify the target is in forchoice
|
||||
require.Equal(t, true, fcs.HasNode(bytesutil.ToBytes32(atts[0].Data.BeaconBlockRoot)))
|
||||
|
||||
// Insert a new block to forkchoice
|
||||
ojc := ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]}
|
||||
b, err := util.GenerateFullBlock(genesisState, pks, util.DefaultBlockGenConfig(), 2)
|
||||
require.NoError(t, err)
|
||||
b.Block.ParentRoot = service.originBlockRoot[:]
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wb))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, wb.Block().Slot(), r, bytesutil.ToBytes32(wb.Block().ParentRoot()), [32]byte{}, 0, 0)
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 2, r, service.originBlockRoot, [32]byte{'b'}, ojc, ojc)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
require.Equal(t, 3, fcs.NodeCount())
|
||||
service.head.root = r // Old head
|
||||
|
||||
require.Equal(t, 1, len(service.cfg.AttPool.ForkchoiceAttestations()))
|
||||
require.NoError(t, err, service.UpdateHead(ctx))
|
||||
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
|
||||
|
||||
require.Equal(t, 0, len(service.cfg.AttPool.ForkchoiceAttestations())) // Validate att pool is empty
|
||||
require.Equal(t, tRoot, service.head.root) // Validate head is the new one
|
||||
}
|
||||
|
||||
@@ -322,6 +322,8 @@ func TestCheckSaveHotStateDB_Disabling(t *testing.T) {
|
||||
opts := testServiceOptsWithDB(t)
|
||||
s, err := NewService(context.Background(), opts...)
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
s.genesisTime = time.Now().Add(time.Duration(-1*int64(st)*int64(params.BeaconConfig().SecondsPerSlot)) * time.Second)
|
||||
s.store.SetFinalizedCheckptAndPayloadHash(ðpb.Checkpoint{}, [32]byte{})
|
||||
require.NoError(t, s.checkSaveHotStateDB(context.Background()))
|
||||
s.genesisTime = time.Now()
|
||||
|
||||
@@ -222,6 +222,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
Root: bytesutil.ToBytes32(finalized.Root)}); err != nil {
|
||||
return errors.Wrap(err, "could not update forkchoice's finalized checkpoint")
|
||||
}
|
||||
forkChoicer.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
|
||||
st, err := s.cfg.StateGen.StateByRoot(s.ctx, fRoot)
|
||||
if err != nil {
|
||||
@@ -478,9 +479,12 @@ func (s *Service) saveGenesisData(ctx context.Context, genesisState state.Beacon
|
||||
if err := s.cfg.ForkChoiceStore.InsertNode(ctx, genesisState, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not process genesis block for fork choice: %v", err)
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetOriginRoot(genesisBlkRoot)
|
||||
// Set genesis as fully validated
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, genesisBlkRoot); err != nil {
|
||||
log.Fatalf("Could not set optimistic status of genesis block to false: %v", err)
|
||||
return errors.Wrap(err, "Could not set optimistic status of genesis block to false")
|
||||
}
|
||||
s.cfg.ForkChoiceStore.SetGenesisTime(uint64(s.genesisTime.Unix()))
|
||||
|
||||
s.setHead(genesisBlkRoot, genesisBlk, genesisState)
|
||||
return nil
|
||||
|
||||
@@ -42,7 +42,6 @@ go_test(
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_google_gofuzz//:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,7 +3,6 @@ package signing
|
||||
import (
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
|
||||
@@ -11,17 +10,19 @@ var ErrNilRegistration = errors.New("nil signed registration")
|
||||
|
||||
// VerifyRegistrationSignature verifies the signature of a validator's registration.
|
||||
func VerifyRegistrationSignature(
|
||||
e types.Epoch,
|
||||
f *ethpb.Fork,
|
||||
sr *ethpb.SignedValidatorRegistrationV1,
|
||||
genesisRoot []byte,
|
||||
) error {
|
||||
if sr == nil || sr.Message == nil {
|
||||
return ErrNilRegistration
|
||||
}
|
||||
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
sd, err := Domain(f, e, d, genesisRoot)
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
sd, err := ComputeDomain(
|
||||
d,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -10,8 +10,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
@@ -23,22 +21,22 @@ func TestVerifyRegistrationSignature(t *testing.T) {
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: sk.PublicKey().Marshal(),
|
||||
}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
d := params.BeaconConfig().DomainApplicationBuilder
|
||||
e := slots.ToEpoch(st.Slot())
|
||||
sig, err := signing.ComputeDomainAndSign(st, e, reg, d, sk)
|
||||
domain, err := signing.ComputeDomain(d, nil, nil)
|
||||
require.NoError(t, err)
|
||||
sr, err := signing.ComputeSigningRoot(reg, domain)
|
||||
require.NoError(t, err)
|
||||
sk.Sign(sr[:]).Marshal()
|
||||
|
||||
sReg := ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
Signature: sig,
|
||||
Signature: sk.Sign(sr[:]).Marshal(),
|
||||
}
|
||||
f := st.Fork()
|
||||
g := st.GenesisValidatorsRoot()
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(e, f, sReg, g))
|
||||
require.NoError(t, signing.VerifyRegistrationSignature(sReg))
|
||||
|
||||
sReg.Signature = []byte("bad")
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrSigFailedToVerify)
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrSigFailedToVerify)
|
||||
|
||||
sReg.Message = nil
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(e, f, sReg, g), signing.ErrNilRegistration)
|
||||
require.ErrorIs(t, signing.VerifyRegistrationSignature(sReg), signing.ErrNilRegistration)
|
||||
}
|
||||
|
||||
@@ -30,6 +30,7 @@ go_library(
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
@@ -61,7 +62,6 @@ go_test(
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
|
||||
@@ -59,7 +59,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- head
|
||||
// |
|
||||
// 3
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(2), Epoch: 1}
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(1), Epoch: 1}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(0), Epoch: 0}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
|
||||
@@ -218,6 +219,7 @@ func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
ctx := context.Background()
|
||||
f := New()
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 0, params.BeaconConfig().ZeroHash, [32]byte{}, params.BeaconConfig().ZeroHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
|
||||
@@ -15,6 +15,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -22,13 +23,14 @@ import (
|
||||
// New initializes a new fork choice store.
|
||||
func New() *ForkChoice {
|
||||
s := &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
@@ -139,7 +141,55 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconS
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
return f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
}
|
||||
|
||||
// updateCheckpoints update the checkpoints when inserting a new node.
|
||||
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
|
||||
if jc.Epoch > f.store.bestJustifiedCheckpoint.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: bytesutil.ToBytes32(jc.Root)}
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: bytesutil.ToBytes32(jc.Root)}
|
||||
} else {
|
||||
currentJcp := f.store.justifiedCheckpoint
|
||||
currentRoot := currentJcp.Root
|
||||
if currentRoot == params.BeaconConfig().ZeroHash {
|
||||
currentRoot = f.store.originRoot
|
||||
}
|
||||
jSlot, err := slots.EpochStart(currentJcp.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if root == currentRoot {
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: jcRoot}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update finalization
|
||||
if fc.Epoch > f.store.finalizedCheckpoint.Epoch {
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
|
||||
Root: bytesutil.ToBytes32(fc.Root)}
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: bytesutil.ToBytes32(jc.Root)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
@@ -208,7 +258,7 @@ func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
}
|
||||
|
||||
// AncestorRoot returns the ancestor root of input block root at a given slot.
|
||||
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
|
||||
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
@@ -217,22 +267,22 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
|
||||
node, ok := f.store.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
return nil, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
n := node
|
||||
for n != nil && n.slot > slot {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
n = n.parent
|
||||
}
|
||||
|
||||
if n == nil {
|
||||
return nil, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
return [32]byte{}, errors.Wrap(ErrNilNode, "could not determine ancestor root")
|
||||
}
|
||||
|
||||
return n.root[:], nil
|
||||
return n.root, nil
|
||||
}
|
||||
|
||||
// updateBalances updates the balances that directly voted for each block taking into account the
|
||||
@@ -397,6 +447,10 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) e
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
f.store.justifiedCheckpoint = jc
|
||||
bj := f.store.bestJustifiedCheckpoint
|
||||
if bj == nil || jc.Epoch > bj.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -484,3 +538,13 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesisTime tracked by forkchoice
|
||||
func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
|
||||
f.store.genesisTime = genesisTime
|
||||
}
|
||||
|
||||
// SetOriginRoot sets the genesis block root
|
||||
func (f *ForkChoice) SetOriginRoot(root [32]byte) {
|
||||
f.store.originRoot = root
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
@@ -13,7 +14,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -244,7 +244,7 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
|
||||
r, err := f.AncestorRoot(ctx, indexToHash(3), 6)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, bytesutil.ToBytes32(r), indexToHash(3))
|
||||
assert.Equal(t, r, indexToHash(3))
|
||||
|
||||
_, err = f.AncestorRoot(ctx, indexToHash(3), 0)
|
||||
assert.ErrorContains(t, ErrNilNode.Error(), err)
|
||||
@@ -252,11 +252,11 @@ func TestForkChoice_AncestorRoot(t *testing.T) {
|
||||
root, err := f.AncestorRoot(ctx, indexToHash(3), 5)
|
||||
require.NoError(t, err)
|
||||
hash3 := indexToHash(3)
|
||||
require.DeepEqual(t, hash3[:], root)
|
||||
require.DeepEqual(t, hash3, root)
|
||||
root, err = f.AncestorRoot(ctx, indexToHash(3), 1)
|
||||
require.NoError(t, err)
|
||||
hash1 := indexToHash(1)
|
||||
require.DeepEqual(t, hash1[:], root)
|
||||
require.DeepEqual(t, hash1, root)
|
||||
}
|
||||
|
||||
func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
@@ -271,8 +271,7 @@ func TestForkChoice_AncestorEqualSlot(t *testing.T) {
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 100)
|
||||
require.NoError(t, err)
|
||||
root := bytesutil.ToBytes32(r)
|
||||
require.Equal(t, root, [32]byte{'1'})
|
||||
require.Equal(t, r, [32]byte{'1'})
|
||||
}
|
||||
|
||||
func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
@@ -287,8 +286,7 @@ func TestForkChoice_AncestorLowerSlot(t *testing.T) {
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'3'}, 150)
|
||||
require.NoError(t, err)
|
||||
root := bytesutil.ToBytes32(r)
|
||||
require.Equal(t, root, [32]byte{'1'})
|
||||
require.Equal(t, r, [32]byte{'1'})
|
||||
}
|
||||
|
||||
func TestForkChoice_RemoveEquivocating(t *testing.T) {
|
||||
@@ -350,7 +348,7 @@ func indexToHash(i uint64) [32]byte {
|
||||
return hash.Hash(b[:])
|
||||
}
|
||||
|
||||
func TestStore_UpdateCheckpoints(t *testing.T) {
|
||||
func TestForkChoice_UpdateJustifiedAndFinalizedCheckpoints(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
jr := [32]byte{'j'}
|
||||
fr := [32]byte{'f'}
|
||||
@@ -593,3 +591,145 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
|
||||
f = setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tests := []struct {
|
||||
name string
|
||||
justified *forkchoicetypes.Checkpoint
|
||||
bestJustified *forkchoicetypes.Checkpoint
|
||||
finalized *forkchoicetypes.Checkpoint
|
||||
newJustified *forkchoicetypes.Checkpoint
|
||||
newFinalized *forkchoicetypes.Checkpoint
|
||||
wantedJustified *forkchoicetypes.Checkpoint
|
||||
wantedBestJustified *forkchoicetypes.Checkpoint
|
||||
wantedFinalized *forkchoicetypes.Checkpoint
|
||||
currentSlot types.Slot
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "lower than store justified and finalized",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 1},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 0},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, early slot, direct descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, early slot, not a descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, late slot, descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, late slot, not descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
},
|
||||
{
|
||||
name: "higher than store finalized, late slot, not descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
},
|
||||
{
|
||||
name: "Unknown checkpoint root, late slot",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'d'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'h'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
wantedErr: "could not determine ancestor root",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fcs := setup(tt.justified.Epoch, tt.finalized.Epoch)
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
|
||||
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
|
||||
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
// restart justifications cause insertion messed it up
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
|
||||
|
||||
jc := ðpb.Checkpoint{Epoch: tt.newJustified.Epoch, Root: tt.newJustified.Root[:]}
|
||||
fc := ðpb.Checkpoint{Epoch: tt.newFinalized.Epoch, Root: tt.newFinalized.Root[:]}
|
||||
err = fcs.updateCheckpoints(ctx, jc, fc)
|
||||
if len(tt.wantedErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantedJustified.Epoch, fcs.store.justifiedCheckpoint.Epoch)
|
||||
require.Equal(t, tt.wantedFinalized.Epoch, fcs.store.finalizedCheckpoint.Epoch)
|
||||
require.Equal(t, tt.wantedJustified.Root, fcs.store.justifiedCheckpoint.Root)
|
||||
require.Equal(t, tt.wantedFinalized.Root, fcs.store.finalizedCheckpoint.Root)
|
||||
require.Equal(t, tt.wantedBestJustified.Epoch, fcs.store.bestJustifiedCheckpoint.Epoch)
|
||||
require.Equal(t, tt.wantedBestJustified.Root, fcs.store.bestJustifiedCheckpoint.Root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 5 with justified epoch of 2, verify head is still at 4.
|
||||
// Insert block 5 with justified epoch of 2, verify head is 5
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -82,32 +82,6 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Verify there's an error when starting from a block with wrong justified epoch.
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- starting from 5 with justified epoch 0 should error
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
_, err = f.Head(context.Background(), balances)
|
||||
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
|
||||
require.ErrorContains(t, wanted, err)
|
||||
|
||||
// Set the justified epoch to 2 and start block to 5 to verify head is 5.
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
// | |
|
||||
// 4 3
|
||||
// |
|
||||
// 5 <- head
|
||||
f.store.justifiedCheckpoint.Epoch = 2
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert block 6 with justified epoch of 2, verify head is at 6.
|
||||
|
||||
@@ -63,11 +63,20 @@ func (s *Store) PruneThreshold() uint64 {
|
||||
func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "doublyLinkedForkchoice.head")
|
||||
defer span.End()
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
|
||||
// JustifiedRoot has to be known
|
||||
justifiedNode, ok := s.nodeByRoot[s.justifiedCheckpoint.Root]
|
||||
if !ok || justifiedNode == nil {
|
||||
return [32]byte{}, errUnknownJustifiedRoot
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
justifiedNode = s.treeRootNode
|
||||
} else {
|
||||
return [32]byte{}, errUnknownJustifiedRoot
|
||||
}
|
||||
}
|
||||
|
||||
// If the justified node doesn't have a best descendant,
|
||||
@@ -78,8 +87,8 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
}
|
||||
|
||||
if !bestDescendant.viableForHead(s.justifiedCheckpoint.Epoch, s.finalizedCheckpoint.Epoch) {
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch %d != %d, justifiedEpoch %d != %d",
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, s.finalizedCheckpoint.Epoch, bestDescendant.justifiedEpoch, s.justifiedCheckpoint.Epoch)
|
||||
return [32]byte{}, fmt.Errorf("head at slot %d with weight %d is not eligible, finalizedEpoch, justified Epoch %d, %d != %d, %d",
|
||||
bestDescendant.slot, bestDescendant.weight/10e9, bestDescendant.finalizedEpoch, bestDescendant.justifiedEpoch, s.finalizedCheckpoint.Epoch, s.justifiedCheckpoint.Epoch)
|
||||
}
|
||||
|
||||
// Update metrics.
|
||||
|
||||
@@ -79,7 +79,7 @@ func TestForkChoice_HasNode(t *testing.T) {
|
||||
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
|
||||
f.store.justifiedCheckpoint.Root = [32]byte{'a'}
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}}
|
||||
_, err := f.store.head(context.Background())
|
||||
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ type ForkChoice struct {
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
type Store struct {
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified epoch in store.
|
||||
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized epoch in store.
|
||||
pruneThreshold uint64 // do not prune tree unless threshold is reached.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
@@ -29,9 +30,11 @@ type Store struct {
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
slashedIndices map[types.ValidatorIndex]bool // the list of equivocating validator indices
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
}
|
||||
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -114,7 +113,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 5 with justified epoch 2, it should be filtered out:
|
||||
// Insert block 5 with justified epoch 2, it becomes head
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -130,9 +129,9 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 6 with justified epoch 0:
|
||||
// Insert block 6 with justified epoch 3: verify it's head
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -141,28 +140,18 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 0
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
// 5 6 <- justified epoch = 3
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4
|
||||
// / \
|
||||
// 2 votes-> 5 6
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
|
||||
// Inset blocks 7, 8 and 9:
|
||||
// Inset blocks 7 and 8
|
||||
// 6 should still be the head, even though 5 has all the votes.
|
||||
// 0
|
||||
// / \
|
||||
@@ -177,23 +166,17 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 7
|
||||
// |
|
||||
// 8
|
||||
// |
|
||||
// 9
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(8), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Update fork choice justified epoch to 2 and start block to 5.
|
||||
// Insert block 9 with justified epoch 3, it becomes head
|
||||
// Verify 9 is the head:
|
||||
// 0
|
||||
// / \
|
||||
@@ -209,15 +192,15 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 8
|
||||
// |
|
||||
// 9 <- head
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(5), Epoch: 2}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(5), Epoch: 2}
|
||||
// 10 <- head
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Insert block 10 and 2 validators updated their vote to 9.
|
||||
// Verify 9 is the head:
|
||||
// Insert block 9 forking 10 verify it's head (lexicographic order)
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -232,45 +215,51 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 8
|
||||
// / \
|
||||
// 2 votes->9 10
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
// 9 10
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Move two votes for 10, verify it's head
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(10), 5)
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Add 3 more validators to the system.
|
||||
balances = []uint64{1, 1, 1, 1, 1}
|
||||
// The new validators voted for 10.
|
||||
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
|
||||
// The new head should be 10.
|
||||
// The new validators voted for 9
|
||||
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(9), 5)
|
||||
// The new head should be 9.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Set the balances of the last 2 validators to 0.
|
||||
balances = []uint64{1, 1, 1, 0, 0}
|
||||
// The head should be back to 9.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Set the balances back to normal.
|
||||
balances = []uint64{1, 1, 1, 1, 1}
|
||||
// The head should be back to 10.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Remove the last 2 validators.
|
||||
balances = []uint64{1, 1, 1}
|
||||
// Set the balances back to normal.
|
||||
balances = []uint64{1, 1, 1, 1, 1}
|
||||
// The head should be back to 9.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Remove the last 2 validators.
|
||||
balances = []uint64{1, 1, 1}
|
||||
// The head should be back to 10.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Verify pruning below the prune threshold does not affect head.
|
||||
f.store.pruneThreshold = 1000
|
||||
@@ -279,7 +268,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Verify pruning above the prune threshold does prune:
|
||||
// 0
|
||||
@@ -300,10 +289,12 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
f.store.pruneThreshold = 1
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
assert.Equal(t, 5, len(f.store.nodeByRoot), "Incorrect nodes length after prune")
|
||||
// we pruned artificially the justified root.
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert new block 11 and verify head is at 11.
|
||||
// 5 6
|
||||
@@ -312,14 +303,14 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 8
|
||||
// / \
|
||||
// 9 10
|
||||
// 10 9
|
||||
// |
|
||||
// head-> 11
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(9), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(10), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 3")
|
||||
}
|
||||
|
||||
@@ -56,7 +56,7 @@ type Getter interface {
|
||||
HasNode([32]byte) bool
|
||||
ProposerBoost() [fieldparams.RootLength]byte
|
||||
HasParent(root [32]byte) bool
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error)
|
||||
AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error)
|
||||
CommonAncestorRoot(ctx context.Context, root1 [32]byte, root2 [32]byte) ([32]byte, error)
|
||||
IsCanonical(root [32]byte) bool
|
||||
FinalizedCheckpoint() *forkchoicetypes.Checkpoint
|
||||
@@ -71,4 +71,6 @@ type Setter interface {
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
UpdateJustifiedCheckpoint(*forkchoicetypes.Checkpoint) error
|
||||
UpdateFinalizedCheckpoint(*forkchoicetypes.Checkpoint) error
|
||||
SetGenesisTime(uint64)
|
||||
SetOriginRoot([32]byte)
|
||||
}
|
||||
|
||||
@@ -31,6 +31,7 @@ go_library(
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
|
||||
@@ -55,7 +55,6 @@ func prepareForkchoiceState(
|
||||
st, err := v3.InitializeFromProto(base)
|
||||
return st, blockRoot, err
|
||||
}
|
||||
|
||||
func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
balances := []uint64{1, 1}
|
||||
f := setup(0, 0)
|
||||
@@ -104,8 +103,8 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- head
|
||||
// |
|
||||
// 3
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(2)}
|
||||
f.store.justifiedCheckpoint = jc
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(1), Epoch: 1}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(0), Epoch: 0}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(2), r, "Incorrect head with justified epoch at 1")
|
||||
@@ -118,8 +117,7 @@ func TestFFGUpdates_OneBranch(t *testing.T) {
|
||||
// 2 <- start
|
||||
// |
|
||||
// 3 <- head
|
||||
jc = &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(3)}
|
||||
f.store.justifiedCheckpoint = jc
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Root: indexToHash(3), Epoch: 2}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(3), r, "Incorrect head with justified epoch at 2")
|
||||
@@ -255,8 +253,7 @@ func TestFFGUpdates_TwoBranches(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head with justified epoch at 0")
|
||||
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
|
||||
f.store.justifiedCheckpoint = jc
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: indexToHash(1)}
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(7), r, "Incorrect head with justified epoch at 0")
|
||||
@@ -266,6 +263,7 @@ func setup(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
f := New()
|
||||
f.store.nodesIndices[params.BeaconConfig().ZeroHash] = 0
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: justifiedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: finalizedEpoch, Root: params.BeaconConfig().ZeroHash}
|
||||
f.store.nodes = append(f.store.nodes, &Node{
|
||||
slot: 0,
|
||||
|
||||
@@ -69,7 +69,7 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 5 with justified epoch of 2, verify head is still at 4.
|
||||
// Insert block 5 with justified epoch of 2, verify head is 5
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -82,32 +82,6 @@ func TestNoVote_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Verify there's an error when starting from a block with wrong justified epoch.
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
// | |
|
||||
// head -> 4 3
|
||||
// |
|
||||
// 5 <- starting from 5 with justified epoch 0 should error
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
_, err = f.Head(context.Background(), balances)
|
||||
wanted := "head at slot 0 with weight 0 is not eligible, finalizedEpoch 1 != 1, justifiedEpoch 2 != 1"
|
||||
require.ErrorContains(t, wanted, err)
|
||||
|
||||
// Set the justified epoch to 2 and start block to 5 to verify head is 5.
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
// | |
|
||||
// 4 3
|
||||
// |
|
||||
// 5 <- head
|
||||
f.store.justifiedCheckpoint.Epoch = 2
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert block 6 with justified epoch of 2, verify head is at 6.
|
||||
|
||||
@@ -15,8 +15,9 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -31,15 +32,16 @@ var lastHeadRoot [32]byte
|
||||
// New initializes a new fork choice store.
|
||||
func New() *ForkChoice {
|
||||
s := &Store{
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodes: make([]*Node, 0),
|
||||
nodesIndices: make(map[[32]byte]uint64),
|
||||
payloadIndices: make(map[[32]byte]uint64),
|
||||
canonicalNodes: make(map[[32]byte]bool),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
justifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
bestJustifiedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
finalizedCheckpoint: &forkchoicetypes.Checkpoint{},
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodes: make([]*Node, 0),
|
||||
nodesIndices: make(map[[32]byte]uint64),
|
||||
payloadIndices: make(map[[32]byte]uint64),
|
||||
canonicalNodes: make(map[[32]byte]bool),
|
||||
slashedIndices: make(map[types.ValidatorIndex]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
@@ -146,7 +148,56 @@ func (f *ForkChoice) InsertNode(ctx context.Context, state state.ReadOnlyBeaconS
|
||||
return errInvalidNilCheckpoint
|
||||
}
|
||||
finalizedEpoch := fc.Epoch
|
||||
return f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
err := f.store.insert(ctx, slot, root, parentRoot, payloadHash, justifiedEpoch, finalizedEpoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return f.updateCheckpoints(ctx, jc, fc)
|
||||
}
|
||||
|
||||
// updateCheckpoints update the checkpoints when inserting a new node.
|
||||
func (f *ForkChoice) updateCheckpoints(ctx context.Context, jc, fc *ethpb.Checkpoint) error {
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
if jc.Epoch > f.store.justifiedCheckpoint.Epoch {
|
||||
bj := f.store.bestJustifiedCheckpoint
|
||||
if bj == nil || jc.Epoch > bj.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: bytesutil.ToBytes32(jc.Root)}
|
||||
}
|
||||
currentSlot := slots.CurrentSlot(f.store.genesisTime)
|
||||
if slots.SinceEpochStarts(currentSlot) < params.BeaconConfig().SafeSlotsToUpdateJustified {
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: bytesutil.ToBytes32(jc.Root)}
|
||||
} else {
|
||||
currentJcp := f.store.justifiedCheckpoint
|
||||
currentRoot := currentJcp.Root
|
||||
if currentRoot == params.BeaconConfig().ZeroHash {
|
||||
currentRoot = f.store.originRoot
|
||||
}
|
||||
jSlot, err := slots.EpochStart(currentJcp.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
jcRoot := bytesutil.ToBytes32(jc.Root)
|
||||
root, err := f.AncestorRoot(ctx, jcRoot, jSlot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if root == currentRoot {
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: jcRoot}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Update finalization
|
||||
if fc.Epoch > f.store.finalizedCheckpoint.Epoch {
|
||||
f.store.finalizedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: fc.Epoch,
|
||||
Root: bytesutil.ToBytes32(fc.Root)}
|
||||
f.store.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch,
|
||||
Root: bytesutil.ToBytes32(jc.Root)}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
@@ -188,7 +239,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// AncestorRoot returns the ancestor root of input block root at a given slot.
|
||||
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([]byte, error) {
|
||||
func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types.Slot) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArray.AncestorRoot")
|
||||
defer span.End()
|
||||
|
||||
@@ -197,25 +248,25 @@ func (f *ForkChoice) AncestorRoot(ctx context.Context, root [32]byte, slot types
|
||||
|
||||
i, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return nil, errors.New("node does not exist")
|
||||
return [32]byte{}, errors.New("node does not exist")
|
||||
}
|
||||
if i >= uint64(len(f.store.nodes)) {
|
||||
return nil, errors.New("node index out of range")
|
||||
return [32]byte{}, errors.New("node index out of range")
|
||||
}
|
||||
|
||||
for f.store.nodes[i].slot > slot {
|
||||
if ctx.Err() != nil {
|
||||
return nil, ctx.Err()
|
||||
return [32]byte{}, ctx.Err()
|
||||
}
|
||||
|
||||
i = f.store.nodes[i].parent
|
||||
|
||||
if i >= uint64(len(f.store.nodes)) {
|
||||
return nil, errors.New("node index out of range")
|
||||
return [32]byte{}, errors.New("node index out of range")
|
||||
}
|
||||
}
|
||||
|
||||
return f.store.nodes[i].root[:], nil
|
||||
return f.store.nodes[i].root, nil
|
||||
}
|
||||
|
||||
// CommonAncestorRoot returns the common ancestor root between the two block roots r1 and r2.
|
||||
@@ -296,6 +347,9 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "protoArrayForkChoice.head")
|
||||
defer span.End()
|
||||
|
||||
s.checkpointsLock.RLock()
|
||||
defer s.checkpointsLock.RUnlock()
|
||||
|
||||
// Justified index has to be valid in node indices map, and can not be out of bound.
|
||||
if s.justifiedCheckpoint == nil {
|
||||
return [32]byte{}, errInvalidNilCheckpoint
|
||||
@@ -303,7 +357,14 @@ func (s *Store) head(ctx context.Context) ([32]byte, error) {
|
||||
|
||||
justifiedIndex, ok := s.nodesIndices[s.justifiedCheckpoint.Root]
|
||||
if !ok {
|
||||
return [32]byte{}, errUnknownJustifiedRoot
|
||||
// If the justifiedCheckpoint is from genesis, then the root is
|
||||
// zeroHash. In this case it should be the root of forkchoice
|
||||
// tree.
|
||||
if s.justifiedCheckpoint.Epoch == params.BeaconConfig().GenesisEpoch {
|
||||
justifiedIndex = uint64(0)
|
||||
} else {
|
||||
return [32]byte{}, errUnknownJustifiedRoot
|
||||
}
|
||||
}
|
||||
if justifiedIndex >= uint64(len(s.nodes)) {
|
||||
return [32]byte{}, errInvalidJustifiedIndex
|
||||
@@ -776,10 +837,10 @@ func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
return headsRoots, headsSlots
|
||||
}
|
||||
|
||||
func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
|
||||
func (f *ForkChoice) ForkChoiceNodes() []*ethpb.ForkChoiceNode {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
ret := make([]*pbrpc.ForkChoiceNode, len(f.store.nodes))
|
||||
ret := make([]*ethpb.ForkChoiceNode, len(f.store.nodes))
|
||||
var parentRoot [32]byte
|
||||
for i, node := range f.store.nodes {
|
||||
root := node.Root()
|
||||
@@ -799,7 +860,7 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
|
||||
bestDescendantRoot = bestDescendantNode.Root()
|
||||
}
|
||||
|
||||
ret[i] = &pbrpc.ForkChoiceNode{
|
||||
ret[i] = ðpb.ForkChoiceNode{
|
||||
Slot: node.Slot(),
|
||||
Root: root[:],
|
||||
Parent: parentRoot[:],
|
||||
@@ -869,6 +930,10 @@ func (f *ForkChoice) UpdateJustifiedCheckpoint(jc *forkchoicetypes.Checkpoint) e
|
||||
f.store.checkpointsLock.Lock()
|
||||
defer f.store.checkpointsLock.Unlock()
|
||||
f.store.justifiedCheckpoint = jc
|
||||
bj := f.store.bestJustifiedCheckpoint
|
||||
if bj == nil || jc.Epoch > bj.Epoch {
|
||||
f.store.bestJustifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: jc.Epoch, Root: jc.Root}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -905,3 +970,13 @@ func (f *ForkChoice) InsertOptimisticChain(ctx context.Context, chain []*forkcho
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesisTime tracked by forkchoice
|
||||
func (f *ForkChoice) SetGenesisTime(genesisTime uint64) {
|
||||
f.store.genesisTime = genesisTime
|
||||
}
|
||||
|
||||
// SetOriginRoot sets the genesis block root
|
||||
func (f *ForkChoice) SetOriginRoot(root [32]byte) {
|
||||
f.store.originRoot = root
|
||||
}
|
||||
|
||||
@@ -3,13 +3,13 @@ package protoarray
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
"github.com/prysmaticlabs/prysm/consensus-types/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -51,7 +51,7 @@ func TestForkChoice_HasNode(t *testing.T) {
|
||||
|
||||
func TestStore_Head_UnknownJustifiedRoot(t *testing.T) {
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
|
||||
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 0, Root: [32]byte{'a'}}
|
||||
s.justifiedCheckpoint = &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'a'}}
|
||||
|
||||
_, err := s.head(context.Background())
|
||||
assert.ErrorContains(t, errUnknownJustifiedRoot.Error(), err)
|
||||
@@ -899,10 +899,10 @@ func TestStore_AncestorRoot(t *testing.T) {
|
||||
|
||||
r, err := f.AncestorRoot(ctx, [32]byte{'c'}, 1)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'a'})
|
||||
assert.Equal(t, r, [32]byte{'a'})
|
||||
r, err = f.AncestorRoot(ctx, [32]byte{'c'}, 2)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, bytesutil.ToBytes32(r), [32]byte{'b'})
|
||||
assert.Equal(t, r, [32]byte{'b'})
|
||||
}
|
||||
|
||||
func TestStore_AncestorRootOutOfBound(t *testing.T) {
|
||||
@@ -1114,3 +1114,145 @@ func TestStore_InsertOptimisticChain(t *testing.T) {
|
||||
f = setup(1, 1)
|
||||
require.NoError(t, f.InsertOptimisticChain(context.Background(), args[2:]))
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateCheckpoints(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
tests := []struct {
|
||||
name string
|
||||
justified *forkchoicetypes.Checkpoint
|
||||
bestJustified *forkchoicetypes.Checkpoint
|
||||
finalized *forkchoicetypes.Checkpoint
|
||||
newJustified *forkchoicetypes.Checkpoint
|
||||
newFinalized *forkchoicetypes.Checkpoint
|
||||
wantedJustified *forkchoicetypes.Checkpoint
|
||||
wantedBestJustified *forkchoicetypes.Checkpoint
|
||||
wantedFinalized *forkchoicetypes.Checkpoint
|
||||
currentSlot types.Slot
|
||||
wantedErr string
|
||||
}{
|
||||
{
|
||||
name: "lower than store justified and finalized",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 1},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 0},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, early slot, direct descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, early slot, not a descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, late slot, descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'b'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
},
|
||||
{
|
||||
name: "higher than store justified, late slot, not descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'g'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
},
|
||||
{
|
||||
name: "higher than store finalized, late slot, not descendant",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
|
||||
wantedJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
wantedFinalized: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'h'}},
|
||||
wantedBestJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'c'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
},
|
||||
{
|
||||
name: "Unknown checkpoint root, late slot",
|
||||
justified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
bestJustified: &forkchoicetypes.Checkpoint{Epoch: 2, Root: [32]byte{'j'}},
|
||||
finalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'f'}},
|
||||
newJustified: &forkchoicetypes.Checkpoint{Epoch: 3, Root: [32]byte{'d'}},
|
||||
newFinalized: &forkchoicetypes.Checkpoint{Epoch: 1, Root: [32]byte{'h'}},
|
||||
currentSlot: params.BeaconConfig().SafeSlotsToUpdateJustified.Add(1),
|
||||
wantedErr: "node does not exist",
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
fcs := setup(tt.justified.Epoch, tt.finalized.Epoch)
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
|
||||
fcs.store.genesisTime = uint64(time.Now().Unix()) - uint64(tt.currentSlot)*params.BeaconConfig().SecondsPerSlot
|
||||
|
||||
state, blkRoot, err := prepareForkchoiceState(ctx, 32, [32]byte{'f'},
|
||||
[32]byte{}, [32]byte{}, tt.finalized.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 64, [32]byte{'j'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.justified.Epoch, tt.finalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'b'},
|
||||
[32]byte{'j'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(ctx, 96, [32]byte{'c'},
|
||||
[32]byte{'f'}, [32]byte{}, tt.newJustified.Epoch, tt.newFinalized.Epoch)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertNode(ctx, state, blkRoot))
|
||||
// restart justifications cause insertion messed it up
|
||||
fcs.store.justifiedCheckpoint = tt.justified
|
||||
fcs.store.finalizedCheckpoint = tt.finalized
|
||||
fcs.store.bestJustifiedCheckpoint = tt.bestJustified
|
||||
|
||||
jc := ðpb.Checkpoint{Epoch: tt.newJustified.Epoch, Root: tt.newJustified.Root[:]}
|
||||
fc := ðpb.Checkpoint{Epoch: tt.newFinalized.Epoch, Root: tt.newFinalized.Root[:]}
|
||||
err = fcs.updateCheckpoints(ctx, jc, fc)
|
||||
if len(tt.wantedErr) > 0 {
|
||||
require.ErrorContains(t, tt.wantedErr, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.wantedJustified.Epoch, fcs.store.justifiedCheckpoint.Epoch)
|
||||
require.Equal(t, tt.wantedFinalized.Epoch, fcs.store.finalizedCheckpoint.Epoch)
|
||||
require.Equal(t, tt.wantedJustified.Root, fcs.store.justifiedCheckpoint.Root)
|
||||
require.Equal(t, tt.wantedFinalized.Root, fcs.store.finalizedCheckpoint.Root)
|
||||
require.Equal(t, tt.wantedBestJustified.Epoch, fcs.store.bestJustifiedCheckpoint.Epoch)
|
||||
require.Equal(t, tt.wantedBestJustified.Root, fcs.store.bestJustifiedCheckpoint.Root)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ type ForkChoice struct {
|
||||
type Store struct {
|
||||
pruneThreshold uint64 // do not prune tree unless threshold is reached.
|
||||
justifiedCheckpoint *forkchoicetypes.Checkpoint // latest justified checkpoint in store.
|
||||
bestJustifiedCheckpoint *forkchoicetypes.Checkpoint // best justified checkpoint in store.
|
||||
finalizedCheckpoint *forkchoicetypes.Checkpoint // latest finalized checkpoint in store.
|
||||
proposerBoostRoot [fieldparams.RootLength]byte // latest block root that was boosted after being received in a timely manner.
|
||||
previousProposerBoostRoot [fieldparams.RootLength]byte // previous block root that was boosted after being received in a timely manner.
|
||||
@@ -29,9 +30,11 @@ type Store struct {
|
||||
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
|
||||
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
|
||||
slashedIndices map[types.ValidatorIndex]bool // The list of equivocating validators
|
||||
originRoot [fieldparams.RootLength]byte // The genesis block root
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
checkpointsLock sync.RWMutex
|
||||
genesisTime uint64
|
||||
}
|
||||
|
||||
// Node defines the individual block which includes its block parent, ancestor and how much weight accounted for it.
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -114,7 +113,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 5 with justified epoch 2, it should be filtered out:
|
||||
// Insert block 5 with justified epoch 2, it becomes head
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -130,9 +129,9 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(4), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(5), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Insert block 6 with justified epoch 0:
|
||||
// Insert block 6 with justified epoch 3: verify it's head
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -141,28 +140,18 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 4 <- head
|
||||
// / \
|
||||
// 5 6 <- justified epoch = 0
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
// 5 6 <- justified epoch = 3
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Moved 2 votes to block 5:
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
// |
|
||||
// 3
|
||||
// |
|
||||
// 4
|
||||
// / \
|
||||
// 2 votes-> 5 6
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(6), indexToHash(4), params.BeaconConfig().ZeroHash, 1, 1)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(5), 4)
|
||||
|
||||
// Inset blocks 7, 8 and 9:
|
||||
// Inset blocks 7 and 8
|
||||
// 6 should still be the head, even though 5 has all the votes.
|
||||
// 0
|
||||
// / \
|
||||
@@ -177,23 +166,17 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// 7
|
||||
// |
|
||||
// 8
|
||||
// |
|
||||
// 9
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(7), indexToHash(5), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(8), indexToHash(7), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(6), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Update fork choice justified epoch to 1 and start block to 5.
|
||||
// Insert block 9 with justified epoch 3, it becomes head
|
||||
// Verify 9 is the head:
|
||||
// 0
|
||||
// / \
|
||||
@@ -209,17 +192,15 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 8
|
||||
// |
|
||||
// 9 <- head
|
||||
jc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(5)}
|
||||
fc := &forkchoicetypes.Checkpoint{Epoch: 2, Root: indexToHash(5)}
|
||||
f.store.justifiedCheckpoint = jc
|
||||
f.store.finalizedCheckpoint = fc
|
||||
// 10 <- head
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Insert block 10 and 2 validators updated their vote to 9.
|
||||
// Verify 9 is the head:
|
||||
// Insert block 9 forking 10 verify it's head (lexicographic order)
|
||||
// 0
|
||||
// / \
|
||||
// 2 1
|
||||
@@ -234,45 +215,51 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 8
|
||||
// / \
|
||||
// 2 votes->9 10
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(9), 5)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(10), indexToHash(8), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
// 9 10
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(9), indexToHash(8), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Move two votes for 10, verify it's head
|
||||
|
||||
f.ProcessAttestation(context.Background(), []uint64{0, 1}, indexToHash(10), 5)
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Add 3 more validators to the system.
|
||||
balances = []uint64{1, 1, 1, 1, 1}
|
||||
// The new validators voted for 10.
|
||||
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(10), 5)
|
||||
// The new head should be 10.
|
||||
// The new validators voted for 9
|
||||
f.ProcessAttestation(context.Background(), []uint64{2, 3, 4}, indexToHash(9), 5)
|
||||
// The new head should be 9.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Set the balances of the last 2 validators to 0.
|
||||
balances = []uint64{1, 1, 1, 0, 0}
|
||||
// The head should be back to 9.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
|
||||
|
||||
// Set the balances back to normal.
|
||||
balances = []uint64{1, 1, 1, 1, 1}
|
||||
// The head should be back to 10.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Remove the last 2 validators.
|
||||
balances = []uint64{1, 1, 1}
|
||||
// Set the balances back to normal.
|
||||
balances = []uint64{1, 1, 1, 1, 1}
|
||||
// The head should be back to 9.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 1")
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Remove the last 2 validators.
|
||||
balances = []uint64{1, 1, 1}
|
||||
// The head should be back to 10.
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Verify pruning below the prune threshold does not affect head.
|
||||
f.store.pruneThreshold = 1000
|
||||
@@ -281,7 +268,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 3")
|
||||
|
||||
// Verify pruning above the prune threshold does prune:
|
||||
// 0
|
||||
@@ -302,10 +289,12 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
f.store.pruneThreshold = 1
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
// we pruned artificially the justified root.
|
||||
f.store.justifiedCheckpoint.Root = indexToHash(5)
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(9), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(10), r, "Incorrect head for with justified epoch at 2")
|
||||
|
||||
// Insert new block 11 and verify head is at 11.
|
||||
// 5 6
|
||||
@@ -314,14 +303,14 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// |
|
||||
// 8
|
||||
// / \
|
||||
// 9 10
|
||||
// 10 9
|
||||
// |
|
||||
// head-> 11
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(9), params.BeaconConfig().ZeroHash, 2, 2)
|
||||
state, blkRoot, err = prepareForkchoiceState(context.Background(), 0, indexToHash(11), indexToHash(10), params.BeaconConfig().ZeroHash, 3, 2)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, f.InsertNode(ctx, state, blkRoot))
|
||||
|
||||
r, err = f.Head(context.Background(), balances)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 2")
|
||||
assert.Equal(t, indexToHash(11), r, "Incorrect head for with justified epoch at 3")
|
||||
}
|
||||
|
||||
@@ -131,10 +131,10 @@ func handleGetSSZ(
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return
|
||||
return true
|
||||
}
|
||||
if respHasError {
|
||||
return
|
||||
return true
|
||||
}
|
||||
if errJson := apimiddleware.DeserializeGrpcResponseBodyIntoContainer(grpcResponseBody, config.responseJson); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
@@ -191,10 +191,10 @@ func handlePostSSZ(
|
||||
respHasError, errJson := apimiddleware.HandleGrpcResponseError(endpoint.Err, grpcResponse, grpcResponseBody, w)
|
||||
if errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
return
|
||||
return true
|
||||
}
|
||||
if respHasError {
|
||||
return
|
||||
return true
|
||||
}
|
||||
if errJson := apimiddleware.Cleanup(grpcResponse.Body); errJson != nil {
|
||||
apimiddleware.WriteError(w, errJson, nil)
|
||||
|
||||
@@ -12,12 +12,10 @@ import (
|
||||
// Requests and responses.
|
||||
//----------------
|
||||
|
||||
// genesisResponseJson is used in /beacon/genesis API endpoint.
|
||||
type genesisResponseJson struct {
|
||||
Data *genesisResponse_GenesisJson `json:"data"`
|
||||
}
|
||||
|
||||
// genesisResponse_GenesisJson is used in /beacon/genesis API endpoint.
|
||||
type genesisResponse_GenesisJson struct {
|
||||
GenesisTime string `json:"genesis_time" time:"true"`
|
||||
GenesisValidatorsRoot string `json:"genesis_validators_root" hex:"true"`
|
||||
@@ -33,158 +31,130 @@ type WeakSubjectivityResponse struct {
|
||||
} `json:"data"`
|
||||
}
|
||||
|
||||
// feeRecipientsRequestJson is used in /validator/prepare_beacon_proposers API endpoint.
|
||||
type feeRecipientsRequestJSON struct {
|
||||
Recipients []*feeRecipientJson `json:"recipients"`
|
||||
}
|
||||
|
||||
// stateRootResponseJson is used in /beacon/states/{state_id}/root API endpoint.
|
||||
type stateRootResponseJson struct {
|
||||
Data *stateRootResponse_StateRootJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// stateRootResponse_StateRootJson is used in /beacon/states/{state_id}/root API endpoint.
|
||||
type stateRootResponse_StateRootJson struct {
|
||||
StateRoot string `json:"root" hex:"true"`
|
||||
}
|
||||
|
||||
// stateForkResponseJson is used in /beacon/states/{state_id}/fork API endpoint.
|
||||
type stateForkResponseJson struct {
|
||||
Data *forkJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// stateFinalityCheckpointResponseJson is used in /beacon/states/{state_id}/finality_checkpoints API endpoint.
|
||||
type stateFinalityCheckpointResponseJson struct {
|
||||
Data *stateFinalityCheckpointResponse_StateFinalityCheckpointJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// stateFinalityCheckpointResponse_StateFinalityCheckpointJson is used in /beacon/states/{state_id}/finality_checkpoints API endpoint.
|
||||
type stateFinalityCheckpointResponse_StateFinalityCheckpointJson struct {
|
||||
PreviousJustified *checkpointJson `json:"previous_justified"`
|
||||
CurrentJustified *checkpointJson `json:"current_justified"`
|
||||
Finalized *checkpointJson `json:"finalized"`
|
||||
}
|
||||
|
||||
// stateValidatorResponseJson is used in /beacon/states/{state_id}/validators API endpoint.
|
||||
type stateValidatorsResponseJson struct {
|
||||
Data []*validatorContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// stateValidatorResponseJson is used in /beacon/states/{state_id}/validators/{validator_id} API endpoint.
|
||||
type stateValidatorResponseJson struct {
|
||||
Data *validatorContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// validatorBalancesResponseJson is used in /beacon/states/{state_id}/validator_balances API endpoint.
|
||||
type validatorBalancesResponseJson struct {
|
||||
Data []*validatorBalanceJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// stateCommitteesResponseJson is used in /beacon/states/{state_id}/committees API endpoint.
|
||||
type stateCommitteesResponseJson struct {
|
||||
Data []*committeeJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// syncCommitteesResponseJson is used in /beacon/states/{state_id}/sync_committees API endpoint.
|
||||
type syncCommitteesResponseJson struct {
|
||||
Data *syncCommitteeValidatorsJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// blockHeadersResponseJson is used in /beacon/headers API endpoint.
|
||||
type blockHeadersResponseJson struct {
|
||||
Data []*blockHeaderContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// blockHeaderResponseJson is used in /beacon/headers/{block_id} API endpoint.
|
||||
type blockHeaderResponseJson struct {
|
||||
Data *blockHeaderContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// blockResponseJson is used in /beacon/blocks/{block_id} API endpoint.
|
||||
type blockResponseJson struct {
|
||||
Data *signedBeaconBlockContainerJson `json:"data"`
|
||||
}
|
||||
|
||||
// blockV2ResponseJson is used in /v2/beacon/blocks/{block_id} API endpoint.
|
||||
type blockV2ResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *signedBeaconBlockContainerV2Json `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// blockRootResponseJson is used in /beacon/blocks/{block_id}/root API endpoint.
|
||||
type blockRootResponseJson struct {
|
||||
Data *blockRootContainerJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// blockAttestationsResponseJson is used in /beacon/blocks/{block_id}/attestations API endpoint.
|
||||
type blockAttestationsResponseJson struct {
|
||||
Data []*attestationJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// attestationsPoolResponseJson is used in /beacon/pool/attestations GET API endpoint.
|
||||
type attestationsPoolResponseJson struct {
|
||||
Data []*attestationJson `json:"data"`
|
||||
}
|
||||
|
||||
// submitAttestationRequestJson is used in /beacon/pool/attestations POST API endpoint.
|
||||
type submitAttestationRequestJson struct {
|
||||
Data []*attestationJson `json:"data"`
|
||||
}
|
||||
|
||||
// attesterSlashingsPoolResponseJson is used in /beacon/pool/attester_slashings API endpoint.
|
||||
type attesterSlashingsPoolResponseJson struct {
|
||||
Data []*attesterSlashingJson `json:"data"`
|
||||
}
|
||||
|
||||
// proposerSlashingsPoolResponseJson is used in /beacon/pool/proposer_slashings API endpoint.
|
||||
type proposerSlashingsPoolResponseJson struct {
|
||||
Data []*proposerSlashingJson `json:"data"`
|
||||
}
|
||||
|
||||
// voluntaryExitsPoolResponseJson is used in /beacon/pool/voluntary_exits API endpoint.
|
||||
type voluntaryExitsPoolResponseJson struct {
|
||||
Data []*signedVoluntaryExitJson `json:"data"`
|
||||
}
|
||||
|
||||
// submitSyncCommitteeSignaturesRequestJson is used in /beacon/pool/sync_committees API endpoint.
|
||||
type submitSyncCommitteeSignaturesRequestJson struct {
|
||||
Data []*syncCommitteeMessageJson `json:"data"`
|
||||
}
|
||||
|
||||
// identityResponseJson is used in /node/identity API endpoint.
|
||||
type identityResponseJson struct {
|
||||
Data *identityJson `json:"data"`
|
||||
}
|
||||
|
||||
// peersResponseJson is used in /node/peers API endpoint.
|
||||
type peersResponseJson struct {
|
||||
Data []*peerJson `json:"data"`
|
||||
}
|
||||
|
||||
// peerResponseJson is used in /node/peers/{peer_id} API endpoint.
|
||||
type peerResponseJson struct {
|
||||
Data *peerJson `json:"data"`
|
||||
}
|
||||
|
||||
// peerCountResponseJson is used in /node/peer_count API endpoint.
|
||||
type peerCountResponseJson struct {
|
||||
Data peerCountResponse_PeerCountJson `json:"data"`
|
||||
}
|
||||
|
||||
// peerCountResponse_PeerCountJson is used in /node/peer_count API endpoint.
|
||||
type peerCountResponse_PeerCountJson struct {
|
||||
Disconnected string `json:"disconnected"`
|
||||
Connecting string `json:"connecting"`
|
||||
@@ -192,111 +162,91 @@ type peerCountResponse_PeerCountJson struct {
|
||||
Disconnecting string `json:"disconnecting"`
|
||||
}
|
||||
|
||||
// versionResponseJson is used in /node/version API endpoint.
|
||||
type versionResponseJson struct {
|
||||
Data *versionJson `json:"data"`
|
||||
}
|
||||
|
||||
// syncingResponseJson is used in /node/syncing API endpoint.
|
||||
type syncingResponseJson struct {
|
||||
Data *helpers.SyncDetailsJson `json:"data"`
|
||||
}
|
||||
|
||||
// beaconStateResponseJson is used in /debug/beacon/states/{state_id} API endpoint.
|
||||
type beaconStateResponseJson struct {
|
||||
Data *beaconStateJson `json:"data"`
|
||||
}
|
||||
|
||||
// beaconStateV2ResponseJson is used in /v2/debug/beacon/states/{state_id} API endpoint.
|
||||
type beaconStateV2ResponseJson struct {
|
||||
Version string `json:"version" enum:"true"`
|
||||
Data *beaconStateContainerV2Json `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// forkChoiceHeadsResponseJson is used in /v1/debug/beacon/heads API endpoint.
|
||||
type forkChoiceHeadsResponseJson struct {
|
||||
Data []*forkChoiceHeadJson `json:"data"`
|
||||
}
|
||||
|
||||
// v2ForkChoiceHeadsResponseJson is used in /v2/debug/beacon/heads API endpoint.
|
||||
type v2ForkChoiceHeadsResponseJson struct {
|
||||
Data []*v2ForkChoiceHeadJson `json:"data"`
|
||||
}
|
||||
|
||||
// forkScheduleResponseJson is used in /config/fork_schedule API endpoint.
|
||||
type forkScheduleResponseJson struct {
|
||||
Data []*forkJson `json:"data"`
|
||||
}
|
||||
|
||||
// depositContractResponseJson is used in /config/deposit_contract API endpoint.
|
||||
type depositContractResponseJson struct {
|
||||
Data *depositContractJson `json:"data"`
|
||||
}
|
||||
|
||||
// specResponseJson is used in /config/spec API endpoint.
|
||||
type specResponseJson struct {
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// dutiesRequestJson is used in several duties-related API endpoints.
|
||||
type dutiesRequestJson struct {
|
||||
Index []string `json:"index"`
|
||||
}
|
||||
|
||||
// attesterDutiesResponseJson is used in /validator/duties/attester/{epoch} API endpoint.
|
||||
type attesterDutiesResponseJson struct {
|
||||
DependentRoot string `json:"dependent_root" hex:"true"`
|
||||
Data []*attesterDutyJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// proposerDutiesResponseJson is used in /validator/duties/proposer/{epoch} API endpoint.
|
||||
type proposerDutiesResponseJson struct {
|
||||
DependentRoot string `json:"dependent_root" hex:"true"`
|
||||
Data []*proposerDutyJson `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// syncCommitteeDutiesResponseJson is used in /validator/duties/sync/{epoch} API endpoint.
|
||||
type syncCommitteeDutiesResponseJson struct {
|
||||
Data []*syncCommitteeDuty `json:"data"`
|
||||
ExecutionOptimistic bool `json:"execution_optimistic"`
|
||||
}
|
||||
|
||||
// produceBlockResponseJson is used in /validator/blocks/{slot} API endpoint.
|
||||
type produceBlockResponseJson struct {
|
||||
Data *beaconBlockJson `json:"data"`
|
||||
}
|
||||
|
||||
// produceBlockResponseV2Json is used in /v2/validator/blocks/{slot} API endpoint.
|
||||
type produceBlockResponseV2Json struct {
|
||||
Version string `json:"version"`
|
||||
Data *beaconBlockContainerV2Json `json:"data"`
|
||||
}
|
||||
|
||||
// produceBlindedBlockResponseJson is used in /v1/validator/blinded_blocks/{slot} API endpoint.
|
||||
type produceBlindedBlockResponseJson struct {
|
||||
Version string `json:"version"`
|
||||
Data *blindedBeaconBlockContainerJson `json:"data"`
|
||||
}
|
||||
|
||||
// produceAttestationDataResponseJson is used in /validator/attestation_data API endpoint.
|
||||
type produceAttestationDataResponseJson struct {
|
||||
Data *attestationDataJson `json:"data"`
|
||||
}
|
||||
|
||||
// aggregateAttestationResponseJson is used in /validator/aggregate_attestation API endpoint.
|
||||
type aggregateAttestationResponseJson struct {
|
||||
Data *attestationJson `json:"data"`
|
||||
}
|
||||
|
||||
// submitBeaconCommitteeSubscriptionsRequestJson is used in /validator/beacon_committee_subscriptions API endpoint.
|
||||
type submitBeaconCommitteeSubscriptionsRequestJson struct {
|
||||
Data []*beaconCommitteeSubscribeJson `json:"data"`
|
||||
}
|
||||
|
||||
// beaconCommitteeSubscribeJson is used in /validator/beacon_committee_subscriptions API endpoint.
|
||||
type beaconCommitteeSubscribeJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
CommitteeIndex string `json:"committee_index"`
|
||||
@@ -305,29 +255,24 @@ type beaconCommitteeSubscribeJson struct {
|
||||
IsAggregator bool `json:"is_aggregator"`
|
||||
}
|
||||
|
||||
// submitBeaconCommitteeSubscriptionsRequestJson is used in /validator/sync_committee_subscriptions API endpoint.
|
||||
type submitSyncCommitteeSubscriptionRequestJson struct {
|
||||
Data []*syncCommitteeSubscriptionJson `json:"data"`
|
||||
}
|
||||
|
||||
// syncCommitteeSubscriptionJson is used in /validator/sync_committee_subscriptions API endpoint.
|
||||
type syncCommitteeSubscriptionJson struct {
|
||||
ValidatorIndex string `json:"validator_index"`
|
||||
SyncCommitteeIndices []string `json:"sync_committee_indices"`
|
||||
UntilEpoch string `json:"until_epoch"`
|
||||
}
|
||||
|
||||
// submitAggregateAndProofsRequestJson is used in /validator/aggregate_and_proofs API endpoint.
|
||||
type submitAggregateAndProofsRequestJson struct {
|
||||
Data []*signedAggregateAttestationAndProofJson `json:"data"`
|
||||
}
|
||||
|
||||
// produceSyncCommitteeContributionResponseJson is used in /validator/sync_committee_contribution API endpoint.
|
||||
type produceSyncCommitteeContributionResponseJson struct {
|
||||
Data *syncCommitteeContributionJson `json:"data"`
|
||||
}
|
||||
|
||||
// submitContributionAndProofsRequestJson is used in /validator/contribution_and_proofs API endpoint.
|
||||
type submitContributionAndProofsRequestJson struct {
|
||||
Data []*signedContributionAndProofJson `json:"data"`
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
"backup.go",
|
||||
"delete.go",
|
||||
"exit.go",
|
||||
"import.go",
|
||||
"list.go",
|
||||
"wallet_utils.go",
|
||||
],
|
||||
@@ -36,6 +37,7 @@ go_test(
|
||||
"backup_test.go",
|
||||
"delete_test.go",
|
||||
"exit_test.go",
|
||||
"import_test.go",
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
|
||||
@@ -7,7 +7,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/runtime/tos"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
@@ -139,13 +138,13 @@ var Commands = &cli.Command{
|
||||
if err := cmd.LoadFlagsFromConfig(cliCtx, cliCtx.Command.Flags); err != nil {
|
||||
return err
|
||||
}
|
||||
return tos.VerifyTosAcceptedOrPrompt(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := features.ConfigureValidator(cliCtx); err != nil {
|
||||
if err := tos.VerifyTosAcceptedOrPrompt(cliCtx); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := accounts.ImportAccountsCli(cliCtx); err != nil {
|
||||
return features.ConfigureValidator(cliCtx)
|
||||
},
|
||||
Action: func(cliCtx *cli.Context) error {
|
||||
if err := accountsImport(cliCtx); err != nil {
|
||||
log.Fatalf("Could not import accounts: %v", err)
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -181,7 +181,7 @@ func TestBackupAccounts_Noninteractive_Imported(t *testing.T) {
|
||||
|
||||
// We attempt to import accounts we wrote to the keys directory
|
||||
// into our newly created wallet.
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
// Next, we attempt to backup the accounts.
|
||||
require.NoError(t, accountsBackup(cliCtx))
|
||||
|
||||
@@ -83,6 +83,7 @@ type testWalletConfig struct {
|
||||
deletePublicKeys string
|
||||
keysDir string
|
||||
backupDir string
|
||||
passwordsDir string
|
||||
walletDir string
|
||||
}
|
||||
|
||||
@@ -169,7 +170,7 @@ func TestDeleteAccounts_Noninteractive(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
// We attempt to import accounts.
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
// We attempt to delete the accounts specified.
|
||||
require.NoError(t, accountsDelete(cliCtx))
|
||||
|
||||
@@ -75,7 +75,7 @@ func TestExitAccountsCli_OK(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
_, keymanager, err := walletWithKeymanager(cliCtx)
|
||||
require.NoError(t, err)
|
||||
@@ -175,7 +175,7 @@ func TestExitAccountsCli_OK_AllPublicKeys(t *testing.T) {
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, accounts.ImportAccountsCli(cliCtx))
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
_, keymanager, err := walletWithKeymanager(cliCtx)
|
||||
require.NoError(t, err)
|
||||
|
||||
113
cmd/validator/accounts/import.go
Normal file
113
cmd/validator/accounts/import.go
Normal file
@@ -0,0 +1,113 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/userprompt"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/client"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/urfave/cli/v2"
|
||||
)
|
||||
|
||||
func accountsImport(c *cli.Context) error {
|
||||
w, err := walletImport(c)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize wallet")
|
||||
}
|
||||
km, err := w.InitializeKeymanager(c.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
dialOpts := client.ConstructDialOptions(
|
||||
c.Int(cmd.GrpcMaxCallRecvMsgSizeFlag.Name),
|
||||
c.String(flags.CertFlag.Name),
|
||||
c.Uint(flags.GrpcRetriesFlag.Name),
|
||||
c.Duration(flags.GrpcRetryDelayFlag.Name),
|
||||
)
|
||||
grpcHeaders := strings.Split(c.String(flags.GrpcHeadersFlag.Name), ",")
|
||||
|
||||
opts := []accounts.Option{
|
||||
accounts.WithWallet(w),
|
||||
accounts.WithKeymanager(km),
|
||||
accounts.WithGRPCDialOpts(dialOpts),
|
||||
accounts.WithBeaconRPCProvider(c.String(flags.BeaconRPCProviderFlag.Name)),
|
||||
accounts.WithGRPCHeaders(grpcHeaders),
|
||||
}
|
||||
|
||||
opts = append(opts, accounts.WithImportPrivateKeys(c.IsSet(flags.ImportPrivateKeyFileFlag.Name)))
|
||||
opts = append(opts, accounts.WithPrivateKeyFile(c.String(flags.ImportPrivateKeyFileFlag.Name)))
|
||||
opts = append(opts, accounts.WithReadPasswordFile(c.IsSet(flags.AccountPasswordFileFlag.Name)))
|
||||
opts = append(opts, accounts.WithPasswordFilePath(c.String(flags.AccountPasswordFileFlag.Name)))
|
||||
|
||||
keysDir, err := userprompt.InputDirectory(c, userprompt.ImportKeysDirPromptText, flags.KeysDirFlag)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse keys directory")
|
||||
}
|
||||
opts = append(opts, accounts.WithKeysDir(keysDir))
|
||||
|
||||
acc, err := accounts.NewCLIManager(opts...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return acc.Import(c.Context)
|
||||
}
|
||||
|
||||
func walletImport(c *cli.Context) (*wallet.Wallet, error) {
|
||||
return wallet.OpenWalletOrElseCli(c, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
walletDir, err := userprompt.InputDirectory(cliCtx, userprompt.WalletDirPromptText, flags.WalletDirFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists, err := wallet.Exists(walletDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, wallet.CheckExistsErrMsg)
|
||||
}
|
||||
if exists {
|
||||
isValid, err := wallet.IsValid(walletDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, wallet.CheckValidityErrMsg)
|
||||
}
|
||||
if !isValid {
|
||||
return nil, errors.New(wallet.InvalidWalletErrMsg)
|
||||
}
|
||||
walletPassword, err := wallet.InputPassword(
|
||||
cliCtx,
|
||||
flags.WalletPasswordFileFlag,
|
||||
wallet.PasswordPromptText,
|
||||
false, /* Do not confirm password */
|
||||
wallet.ValidateExistingPass,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: walletPassword,
|
||||
})
|
||||
}
|
||||
|
||||
cfg, err := accounts.ExtractWalletCreationConfigFromCli(cliCtx, keymanager.Local)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w := wallet.New(&wallet.Config{
|
||||
KeymanagerKind: cfg.WalletCfg.KeymanagerKind,
|
||||
WalletDir: cfg.WalletCfg.WalletDir,
|
||||
WalletPassword: cfg.WalletCfg.WalletPassword,
|
||||
})
|
||||
if err = accounts.CreateLocalKeymanagerWallet(cliCtx.Context, w); err != nil {
|
||||
return nil, errors.Wrap(err, "could not create keymanager")
|
||||
}
|
||||
log.WithField("wallet-path", cfg.WalletCfg.WalletDir).Info(
|
||||
"Successfully created new wallet",
|
||||
)
|
||||
return w, nil
|
||||
})
|
||||
}
|
||||
262
cmd/validator/accounts/import_test.go
Normal file
262
cmd/validator/accounts/import_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package accounts
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/local"
|
||||
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
|
||||
)
|
||||
|
||||
func TestImport_Noninteractive(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keysDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure there are no accounts at the start.
|
||||
accounts, err := keymanager.ValidatingAccountNames()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(accounts), 0)
|
||||
|
||||
// Create 2 keys.
|
||||
createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
createKeystore(t, keysDir)
|
||||
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
w, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(keys))
|
||||
}
|
||||
|
||||
// TestImport_DuplicateKeys is a regression test that ensures correction function if duplicate keys are being imported
|
||||
func TestImport_DuplicateKeys(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keysDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a key and then copy it to create a duplicate
|
||||
_, keystorePath := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
input, err := os.ReadFile(keystorePath)
|
||||
require.NoError(t, err)
|
||||
keystorePath2 := filepath.Join(keysDir, "copyOfKeystore.json")
|
||||
err = os.WriteFile(keystorePath2, input, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
_, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should only be 1 account as the duplicate keystore was ignored
|
||||
assert.Equal(t, 1, len(keys))
|
||||
}
|
||||
|
||||
func TestImport_Noninteractive_RandomName(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keysDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure there are no accounts at the start.
|
||||
accounts, err := keymanager.ValidatingAccountNames()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(accounts), 0)
|
||||
|
||||
// Create 2 keys.
|
||||
createRandomNameKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
createRandomNameKeystore(t, keysDir)
|
||||
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
w, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(keys))
|
||||
}
|
||||
|
||||
// Returns the fullPath to the newly created keystore file.
|
||||
func createRandomNameKeystore(t *testing.T, path string) (*keymanager.Keystore, string) {
|
||||
validatingKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
encryptor := keystorev4.New()
|
||||
cryptoFields, err := encryptor.Encrypt(validatingKey.Marshal(), password)
|
||||
require.NoError(t, err)
|
||||
id, err := uuid.NewRandom()
|
||||
require.NoError(t, err)
|
||||
keystoreFile := &keymanager.Keystore{
|
||||
Crypto: cryptoFields,
|
||||
ID: id.String(),
|
||||
Pubkey: fmt.Sprintf("%x", validatingKey.PublicKey().Marshal()),
|
||||
Version: encryptor.Version(),
|
||||
Name: encryptor.Name(),
|
||||
}
|
||||
encoded, err := json.MarshalIndent(keystoreFile, "", "\t")
|
||||
require.NoError(t, err)
|
||||
// Write the encoded keystore to disk with the timestamp appended
|
||||
random, err := rand.Int(rand.Reader, big.NewInt(1000000))
|
||||
require.NoError(t, err)
|
||||
fullPath := filepath.Join(path, fmt.Sprintf("test-%d-keystore", random.Int64()))
|
||||
require.NoError(t, os.WriteFile(fullPath, encoded, os.ModePerm))
|
||||
return keystoreFile, fullPath
|
||||
}
|
||||
|
||||
func TestImport_Noninteractive_Filepath(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
_, keystorePath := createKeystore(t, keysDir)
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keystorePath,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := accounts.CreateWalletWithKeymanager(cliCtx.Context, &accounts.CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure there are no accounts at the start.
|
||||
accounts, err := keymanager.ValidatingAccountNames()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(accounts), 0)
|
||||
|
||||
require.NoError(t, accountsImport(cliCtx))
|
||||
|
||||
w, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(keys))
|
||||
}
|
||||
@@ -160,9 +160,6 @@ func applySepoliaFeatureFlags(ctx *cli.Context) {
|
||||
if err := ctx.Set(enableForkChoiceDoublyLinkedTree.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling doubly linked tree forkchoice flag")
|
||||
}
|
||||
if err := ctx.Set(enableNativeState.Names()[0], "true"); err != nil {
|
||||
log.WithError(err).Debug("error enabling native state flag")
|
||||
}
|
||||
}
|
||||
|
||||
// ConfigureBeaconChain sets the global config based
|
||||
@@ -209,10 +206,10 @@ func ConfigureBeaconChain(ctx *cli.Context) error {
|
||||
log.WithField(enableHistoricalSpaceRepresentation.Name, enableHistoricalSpaceRepresentation.Usage).Warn(enabledFeatureFlag)
|
||||
cfg.EnableHistoricalSpaceRepresentation = true
|
||||
}
|
||||
cfg.EnableNativeState = false
|
||||
if ctx.Bool(enableNativeState.Name) {
|
||||
logEnabled(enableNativeState)
|
||||
cfg.EnableNativeState = true
|
||||
cfg.EnableNativeState = true
|
||||
if ctx.Bool(disableNativeState.Name) {
|
||||
logDisabled(disableNativeState)
|
||||
cfg.EnableNativeState = false
|
||||
}
|
||||
if ctx.Bool(enableVecHTR.Name) {
|
||||
logEnabled(enableVecHTR)
|
||||
|
||||
@@ -115,6 +115,11 @@ var (
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
deprecatedEnableNativeState = &cli.BoolFlag{
|
||||
Name: "enable-native-state",
|
||||
Usage: deprecatedUsage,
|
||||
Hidden: true,
|
||||
}
|
||||
)
|
||||
|
||||
var deprecatedFlags = []cli.Flag{
|
||||
@@ -138,4 +143,5 @@ var deprecatedFlags = []cli.Flag{
|
||||
deprecatedDisableBatchGossipVerification,
|
||||
deprecatedDisableCorrectlyInsertOrphanedAtts,
|
||||
deprecatedDisableCorrectlyPruneCanonicalAtts,
|
||||
deprecatedEnableNativeState,
|
||||
}
|
||||
|
||||
@@ -101,9 +101,9 @@ var (
|
||||
" (Warning): Once enabled, this feature migrates your database in to a new schema and " +
|
||||
"there is no going back. At worst, your entire database might get corrupted.",
|
||||
}
|
||||
enableNativeState = &cli.BoolFlag{
|
||||
Name: "enable-native-state",
|
||||
Usage: "Enables representing the beacon state as a pure Go struct.",
|
||||
disableNativeState = &cli.BoolFlag{
|
||||
Name: "disable-native-state",
|
||||
Usage: "Disables representing the beacon state as a pure Go struct.",
|
||||
}
|
||||
enableVecHTR = &cli.BoolFlag{
|
||||
Name: "enable-vectorized-htr",
|
||||
@@ -162,7 +162,7 @@ var BeaconChainFlags = append(deprecatedFlags, []cli.Flag{
|
||||
disableBroadcastSlashingFlag,
|
||||
enableSlasherFlag,
|
||||
enableHistoricalSpaceRepresentation,
|
||||
enableNativeState,
|
||||
disableNativeState,
|
||||
enableVecHTR,
|
||||
enableForkChoiceDoublyLinkedTree,
|
||||
enableGossipBatchAggregation,
|
||||
|
||||
@@ -7,6 +7,7 @@ go_library(
|
||||
visibility = ["//visibility:public"],
|
||||
deps = [
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -3,6 +3,7 @@ package validator_service_config
|
||||
import (
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
field_params "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
// ProposerSettingsPayload is the struct representation of the JSON or YAML payload set in the validator through the CLI.
|
||||
@@ -33,3 +34,11 @@ type ProposerOption struct {
|
||||
FeeRecipient common.Address
|
||||
GasLimit uint64
|
||||
}
|
||||
|
||||
// DefaultProposerOption returns a Proposer Option with defaults filled
|
||||
func DefaultProposerOption() ProposerOption {
|
||||
return ProposerOption{
|
||||
FeeRecipient: params.BeaconConfig().DefaultFeeRecipient,
|
||||
GasLimit: params.BeaconConfig().DefaultBuilderGasLimit,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -120,6 +120,118 @@
|
||||
}
|
||||
},
|
||||
"response": []
|
||||
},
|
||||
{
|
||||
"name": "get fee recipient",
|
||||
"request": {
|
||||
"auth": {
|
||||
"type": "bearer",
|
||||
"bearer": [
|
||||
{
|
||||
"key": "token",
|
||||
"value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e30.ck3J6tcvHcI74IiFjyJqcBH-MmNAq-fMr0ncyZkGvFM",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"method": "GET",
|
||||
"header": [
|
||||
{
|
||||
"key": "Content-Type",
|
||||
"value": "application/json",
|
||||
"type": "default"
|
||||
}
|
||||
],
|
||||
"url": {
|
||||
"raw": "localhost:7500/eth/v1/validator/0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591494/feerecipient",
|
||||
"host": [
|
||||
"localhost"
|
||||
],
|
||||
"port": "7500",
|
||||
"path": [
|
||||
"eth",
|
||||
"v1",
|
||||
"validator",
|
||||
"0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591494",
|
||||
"feerecipient"
|
||||
]
|
||||
}
|
||||
},
|
||||
"response": []
|
||||
},
|
||||
{
|
||||
"name": "set fee recipient",
|
||||
"request": {
|
||||
"auth": {
|
||||
"type": "bearer",
|
||||
"bearer": [
|
||||
{
|
||||
"key": "token",
|
||||
"value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e30.ck3J6tcvHcI74IiFjyJqcBH-MmNAq-fMr0ncyZkGvFM",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"method": "POST",
|
||||
"header": [],
|
||||
"body": {
|
||||
"mode": "raw",
|
||||
"raw": "{\n \"ethaddress\":\"0x046FB65722E7b2455012bfebF6177F4d2e9738D9\"\n}\n"
|
||||
},
|
||||
"url": {
|
||||
"raw": "localhost:7500/eth/v1/validator/0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591494/feerecipient",
|
||||
"host": [
|
||||
"localhost"
|
||||
],
|
||||
"port": "7500",
|
||||
"path": [
|
||||
"eth",
|
||||
"v1",
|
||||
"validator",
|
||||
"0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591494",
|
||||
"feerecipient"
|
||||
]
|
||||
}
|
||||
},
|
||||
"response": []
|
||||
},
|
||||
{
|
||||
"name": "delete fee recipient",
|
||||
"request": {
|
||||
"auth": {
|
||||
"type": "bearer",
|
||||
"bearer": [
|
||||
{
|
||||
"key": "token",
|
||||
"value": "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.e30.ck3J6tcvHcI74IiFjyJqcBH-MmNAq-fMr0ncyZkGvFM",
|
||||
"type": "string"
|
||||
}
|
||||
]
|
||||
},
|
||||
"method": "DELETE",
|
||||
"header": [
|
||||
{
|
||||
"key": "Content-Type",
|
||||
"value": "application/json",
|
||||
"type": "default"
|
||||
}
|
||||
],
|
||||
"url": {
|
||||
"raw": "localhost:7500/eth/v1/validator/0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591494/feerecipient",
|
||||
"host": [
|
||||
"localhost"
|
||||
],
|
||||
"port": "7500",
|
||||
"path": [
|
||||
"eth",
|
||||
"v1",
|
||||
"validator",
|
||||
"0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591494",
|
||||
"feerecipient"
|
||||
]
|
||||
}
|
||||
},
|
||||
"response": []
|
||||
}
|
||||
]
|
||||
}
|
||||
654
proto/eth/service/key_management.pb.go
generated
654
proto/eth/service/key_management.pb.go
generated
@@ -944,6 +944,155 @@ func (x *DeletedRemoteKeysStatus) GetMessage() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type PubkeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
}
|
||||
|
||||
func (x *PubkeyRequest) Reset() {
|
||||
*x = PubkeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[14]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *PubkeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*PubkeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *PubkeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[14]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use PubkeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*PubkeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_eth_service_key_management_proto_rawDescGZIP(), []int{14}
|
||||
}
|
||||
|
||||
func (x *PubkeyRequest) GetPubkey() []byte {
|
||||
if x != nil {
|
||||
return x.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type GetFeeRecipientByPubkeyResponse struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Data *GetFeeRecipientByPubkeyResponse_FeeRecipient `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse) Reset() {
|
||||
*x = GetFeeRecipientByPubkeyResponse{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[15]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetFeeRecipientByPubkeyResponse) ProtoMessage() {}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[15]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetFeeRecipientByPubkeyResponse.ProtoReflect.Descriptor instead.
|
||||
func (*GetFeeRecipientByPubkeyResponse) Descriptor() ([]byte, []int) {
|
||||
return file_proto_eth_service_key_management_proto_rawDescGZIP(), []int{15}
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse) GetData() *GetFeeRecipientByPubkeyResponse_FeeRecipient {
|
||||
if x != nil {
|
||||
return x.Data
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type SetFeeRecipientByPubkeyRequest struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
Ethaddress []byte `protobuf:"bytes,2,opt,name=ethaddress,proto3" json:"ethaddress,omitempty"`
|
||||
}
|
||||
|
||||
func (x *SetFeeRecipientByPubkeyRequest) Reset() {
|
||||
*x = SetFeeRecipientByPubkeyRequest{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[16]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *SetFeeRecipientByPubkeyRequest) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*SetFeeRecipientByPubkeyRequest) ProtoMessage() {}
|
||||
|
||||
func (x *SetFeeRecipientByPubkeyRequest) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[16]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use SetFeeRecipientByPubkeyRequest.ProtoReflect.Descriptor instead.
|
||||
func (*SetFeeRecipientByPubkeyRequest) Descriptor() ([]byte, []int) {
|
||||
return file_proto_eth_service_key_management_proto_rawDescGZIP(), []int{16}
|
||||
}
|
||||
|
||||
func (x *SetFeeRecipientByPubkeyRequest) GetPubkey() []byte {
|
||||
if x != nil {
|
||||
return x.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *SetFeeRecipientByPubkeyRequest) GetEthaddress() []byte {
|
||||
if x != nil {
|
||||
return x.Ethaddress
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
type ListKeystoresResponse_Keystore struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
@@ -956,7 +1105,7 @@ type ListKeystoresResponse_Keystore struct {
|
||||
func (x *ListKeystoresResponse_Keystore) Reset() {
|
||||
*x = ListKeystoresResponse_Keystore{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[14]
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[17]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -969,7 +1118,7 @@ func (x *ListKeystoresResponse_Keystore) String() string {
|
||||
func (*ListKeystoresResponse_Keystore) ProtoMessage() {}
|
||||
|
||||
func (x *ListKeystoresResponse_Keystore) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[14]
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[17]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1012,7 +1161,7 @@ type ListRemoteKeysResponse_Keystore struct {
|
||||
func (x *ListRemoteKeysResponse_Keystore) Reset() {
|
||||
*x = ListRemoteKeysResponse_Keystore{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[15]
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[18]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1025,7 +1174,7 @@ func (x *ListRemoteKeysResponse_Keystore) String() string {
|
||||
func (*ListRemoteKeysResponse_Keystore) ProtoMessage() {}
|
||||
|
||||
func (x *ListRemoteKeysResponse_Keystore) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[15]
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[18]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1074,7 +1223,7 @@ type ImportRemoteKeysRequest_Keystore struct {
|
||||
func (x *ImportRemoteKeysRequest_Keystore) Reset() {
|
||||
*x = ImportRemoteKeysRequest_Keystore{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[16]
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[19]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
@@ -1087,7 +1236,7 @@ func (x *ImportRemoteKeysRequest_Keystore) String() string {
|
||||
func (*ImportRemoteKeysRequest_Keystore) ProtoMessage() {}
|
||||
|
||||
func (x *ImportRemoteKeysRequest_Keystore) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[16]
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[19]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
@@ -1117,6 +1266,61 @@ func (x *ImportRemoteKeysRequest_Keystore) GetUrl() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
type GetFeeRecipientByPubkeyResponse_FeeRecipient struct {
|
||||
state protoimpl.MessageState
|
||||
sizeCache protoimpl.SizeCache
|
||||
unknownFields protoimpl.UnknownFields
|
||||
|
||||
Pubkey []byte `protobuf:"bytes,1,opt,name=pubkey,proto3" json:"pubkey,omitempty"`
|
||||
Ethaddress []byte `protobuf:"bytes,2,opt,name=ethaddress,proto3" json:"ethaddress,omitempty"`
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse_FeeRecipient) Reset() {
|
||||
*x = GetFeeRecipientByPubkeyResponse_FeeRecipient{}
|
||||
if protoimpl.UnsafeEnabled {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[20]
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse_FeeRecipient) String() string {
|
||||
return protoimpl.X.MessageStringOf(x)
|
||||
}
|
||||
|
||||
func (*GetFeeRecipientByPubkeyResponse_FeeRecipient) ProtoMessage() {}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse_FeeRecipient) ProtoReflect() protoreflect.Message {
|
||||
mi := &file_proto_eth_service_key_management_proto_msgTypes[20]
|
||||
if protoimpl.UnsafeEnabled && x != nil {
|
||||
ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x))
|
||||
if ms.LoadMessageInfo() == nil {
|
||||
ms.StoreMessageInfo(mi)
|
||||
}
|
||||
return ms
|
||||
}
|
||||
return mi.MessageOf(x)
|
||||
}
|
||||
|
||||
// Deprecated: Use GetFeeRecipientByPubkeyResponse_FeeRecipient.ProtoReflect.Descriptor instead.
|
||||
func (*GetFeeRecipientByPubkeyResponse_FeeRecipient) Descriptor() ([]byte, []int) {
|
||||
return file_proto_eth_service_key_management_proto_rawDescGZIP(), []int{15, 0}
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse_FeeRecipient) GetPubkey() []byte {
|
||||
if x != nil {
|
||||
return x.Pubkey
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (x *GetFeeRecipientByPubkeyResponse_FeeRecipient) GetEthaddress() []byte {
|
||||
if x != nil {
|
||||
return x.Ethaddress
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
var File_proto_eth_service_key_management_proto protoreflect.FileDescriptor
|
||||
|
||||
var file_proto_eth_service_key_management_proto_rawDesc = []byte{
|
||||
@@ -1252,73 +1456,124 @@ var file_proto_eth_service_key_management_proto_rawDesc = []byte{
|
||||
0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x22, 0x2f, 0x0a,
|
||||
0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x0d, 0x0a, 0x09, 0x4e, 0x4f, 0x54, 0x5f, 0x46,
|
||||
0x4f, 0x55, 0x4e, 0x44, 0x10, 0x00, 0x12, 0x0b, 0x0a, 0x07, 0x44, 0x45, 0x4c, 0x45, 0x54, 0x45,
|
||||
0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x32, 0xee,
|
||||
0x06, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74,
|
||||
0x12, 0x78, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65,
|
||||
0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2b, 0x2e, 0x65, 0x74, 0x68, 0x65,
|
||||
0x44, 0x10, 0x01, 0x12, 0x09, 0x0a, 0x05, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x03, 0x22, 0x27,
|
||||
0x0a, 0x0d, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12,
|
||||
0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x22, 0xc1, 0x01, 0x0a, 0x1f, 0x47, 0x65, 0x74, 0x46,
|
||||
0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x79, 0x50, 0x75, 0x62,
|
||||
0x6b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x56, 0x0a, 0x04, 0x64,
|
||||
0x61, 0x74, 0x61, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x42, 0x2e, 0x65, 0x74, 0x68, 0x65,
|
||||
0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65,
|
||||
0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65,
|
||||
0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a,
|
||||
0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31,
|
||||
0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x95, 0x01, 0x0a, 0x0f, 0x49,
|
||||
0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x2c,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f,
|
||||
0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x1f, 0x22, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65,
|
||||
0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x3a,
|
||||
0x01, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x0f, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79,
|
||||
0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75,
|
||||
0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65,
|
||||
0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f,
|
||||
0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x2a, 0x1a, 0x2f, 0x69, 0x6e,
|
||||
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65,
|
||||
0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x7b, 0x0a, 0x0e, 0x4c, 0x69,
|
||||
0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x16, 0x2e, 0x67,
|
||||
0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45,
|
||||
0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74,
|
||||
0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1d, 0x12, 0x1b, 0x2f, 0x69, 0x6e, 0x74,
|
||||
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x6d,
|
||||
0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x99, 0x01, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f,
|
||||
0x72, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65,
|
||||
0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x65, 0x74,
|
||||
0x2e, 0x47, 0x65, 0x74, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74,
|
||||
0x42, 0x79, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65,
|
||||
0x2e, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x52, 0x04, 0x64,
|
||||
0x61, 0x74, 0x61, 0x1a, 0x46, 0x0a, 0x0c, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69,
|
||||
0x65, 0x6e, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20,
|
||||
0x01, 0x28, 0x0c, 0x52, 0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65,
|
||||
0x74, 0x68, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52,
|
||||
0x0a, 0x65, 0x74, 0x68, 0x61, 0x64, 0x64, 0x72, 0x65, 0x73, 0x73, 0x22, 0x58, 0x0a, 0x1e, 0x53,
|
||||
0x65, 0x74, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x79,
|
||||
0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a,
|
||||
0x06, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x70,
|
||||
0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x1e, 0x0a, 0x0a, 0x65, 0x74, 0x68, 0x61, 0x64, 0x64, 0x72,
|
||||
0x65, 0x73, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x65, 0x74, 0x68, 0x61, 0x64,
|
||||
0x64, 0x72, 0x65, 0x73, 0x73, 0x32, 0xe1, 0x0a, 0x0a, 0x0d, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e,
|
||||
0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x12, 0x78, 0x0a, 0x0d, 0x4c, 0x69, 0x73, 0x74, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c,
|
||||
0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79,
|
||||
0x1a, 0x2b, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e,
|
||||
0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x22, 0x82,
|
||||
0xd3, 0xe4, 0x93, 0x02, 0x1c, 0x12, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c,
|
||||
0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65,
|
||||
0x73, 0x12, 0x95, 0x01, 0x0a, 0x0f, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70,
|
||||
0x6f, 0x72, 0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75,
|
||||
0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72,
|
||||
0x74, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x1f, 0x22, 0x1a, 0x2f, 0x69, 0x6e, 0x74,
|
||||
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79,
|
||||
0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x95, 0x01, 0x0a, 0x0f, 0x44, 0x65,
|
||||
0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x12, 0x2c, 0x2e,
|
||||
0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72,
|
||||
0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74,
|
||||
0x6f, 0x72, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2d, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4,
|
||||
0x93, 0x02, 0x20, 0x22, 0x1b, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65,
|
||||
0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79, 0x73,
|
||||
0x3a, 0x01, 0x2a, 0x12, 0x99, 0x01, 0x0a, 0x10, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65,
|
||||
0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72,
|
||||
0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x25, 0x82, 0xd3, 0xe4, 0x93,
|
||||
0x02, 0x1f, 0x2a, 0x1a, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74,
|
||||
0x68, 0x2f, 0x76, 0x31, 0x2f, 0x6b, 0x65, 0x79, 0x73, 0x74, 0x6f, 0x72, 0x65, 0x73, 0x3a, 0x01,
|
||||
0x2a, 0x12, 0x7b, 0x0a, 0x0e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x12, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f,
|
||||
0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x1a, 0x2c, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79,
|
||||
0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x23, 0x82, 0xd3, 0xe4, 0x93, 0x02,
|
||||
0x1d, 0x12, 0x1b, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68,
|
||||
0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79, 0x73, 0x12, 0x99,
|
||||
0x01, 0x0a, 0x10, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b,
|
||||
0x65, 0x79, 0x73, 0x12, 0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72,
|
||||
0x74, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65,
|
||||
0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74,
|
||||
0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x49, 0x6d, 0x70, 0x6f, 0x72, 0x74,
|
||||
0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e,
|
||||
0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x22, 0x1b, 0x2f, 0x69, 0x6e, 0x74,
|
||||
0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x6d,
|
||||
0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0x99, 0x01, 0x0a, 0x10, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x12,
|
||||
0x2d, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d,
|
||||
0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f,
|
||||
0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26,
|
||||
0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a, 0x1b, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61,
|
||||
0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x6b,
|
||||
0x65, 0x79, 0x73, 0x3a, 0x01, 0x2a, 0x12, 0xb0, 0x01, 0x0a, 0x18, 0x4c, 0x69, 0x73, 0x74, 0x46,
|
||||
0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x79, 0x50, 0x75, 0x62,
|
||||
0x6b, 0x65, 0x79, 0x12, 0x23, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65,
|
||||
0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x62, 0x6b, 0x65,
|
||||
0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x35, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72,
|
||||
0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e,
|
||||
0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73,
|
||||
0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2e, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65,
|
||||
0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x44,
|
||||
0x65, 0x6c, 0x65, 0x74, 0x65, 0x52, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x4b, 0x65, 0x79, 0x73, 0x52,
|
||||
0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x26, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x20, 0x2a,
|
||||
0x1b, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76,
|
||||
0x31, 0x2f, 0x72, 0x65, 0x6d, 0x6f, 0x74, 0x65, 0x6b, 0x65, 0x79, 0x73, 0x3a, 0x01, 0x2a, 0x42,
|
||||
0x97, 0x01, 0x0a, 0x18, 0x6f, 0x72, 0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x19, 0x4b, 0x65,
|
||||
0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67, 0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75,
|
||||
0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c,
|
||||
0x61, 0x62, 0x73, 0x2f, 0x70, 0x72, 0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f,
|
||||
0x65, 0x74, 0x68, 0x2f, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xaa, 0x02, 0x14, 0x45, 0x74,
|
||||
0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x45, 0x74, 0x68, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69,
|
||||
0x63, 0x65, 0xca, 0x02, 0x14, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74,
|
||||
0x68, 0x5c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f,
|
||||
0x33,
|
||||
0x47, 0x65, 0x74, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42,
|
||||
0x79, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22,
|
||||
0x38, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x32, 0x12, 0x30, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e,
|
||||
0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61,
|
||||
0x74, 0x6f, 0x72, 0x2f, 0x7b, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x66, 0x65, 0x65,
|
||||
0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x12, 0xa4, 0x01, 0x0a, 0x17, 0x53, 0x65,
|
||||
0x74, 0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x79, 0x50,
|
||||
0x75, 0x62, 0x6b, 0x65, 0x79, 0x12, 0x34, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x53, 0x65, 0x74,
|
||||
0x46, 0x65, 0x65, 0x52, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x79, 0x50, 0x75,
|
||||
0x62, 0x6b, 0x65, 0x79, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f,
|
||||
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d,
|
||||
0x70, 0x74, 0x79, 0x22, 0x3b, 0x82, 0xd3, 0xe4, 0x93, 0x02, 0x35, 0x22, 0x30, 0x2f, 0x69, 0x6e,
|
||||
0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61,
|
||||
0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72, 0x2f, 0x7b, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x7d,
|
||||
0x2f, 0x66, 0x65, 0x65, 0x72, 0x65, 0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x3a, 0x01, 0x2a,
|
||||
0x12, 0x96, 0x01, 0x0a, 0x1a, 0x44, 0x65, 0x6c, 0x65, 0x74, 0x65, 0x46, 0x65, 0x65, 0x52, 0x65,
|
||||
0x63, 0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x42, 0x79, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x12,
|
||||
0x23, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x50, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x52, 0x65, 0x71,
|
||||
0x75, 0x65, 0x73, 0x74, 0x1a, 0x16, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72,
|
||||
0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x45, 0x6d, 0x70, 0x74, 0x79, 0x22, 0x3b, 0x82, 0xd3,
|
||||
0xe4, 0x93, 0x02, 0x35, 0x2a, 0x30, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x6e, 0x61, 0x6c, 0x2f,
|
||||
0x65, 0x74, 0x68, 0x2f, 0x76, 0x31, 0x2f, 0x76, 0x61, 0x6c, 0x69, 0x64, 0x61, 0x74, 0x6f, 0x72,
|
||||
0x2f, 0x7b, 0x70, 0x75, 0x62, 0x6b, 0x65, 0x79, 0x7d, 0x2f, 0x66, 0x65, 0x65, 0x72, 0x65, 0x63,
|
||||
0x69, 0x70, 0x69, 0x65, 0x6e, 0x74, 0x3a, 0x01, 0x2a, 0x42, 0x97, 0x01, 0x0a, 0x18, 0x6f, 0x72,
|
||||
0x67, 0x2e, 0x65, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x2e, 0x65, 0x74, 0x68, 0x2e, 0x73,
|
||||
0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x42, 0x19, 0x4b, 0x65, 0x79, 0x4d, 0x61, 0x6e, 0x61, 0x67,
|
||||
0x65, 0x6d, 0x65, 0x6e, 0x74, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, 0x74,
|
||||
0x6f, 0x50, 0x01, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f,
|
||||
0x70, 0x72, 0x79, 0x73, 0x6d, 0x61, 0x74, 0x69, 0x63, 0x6c, 0x61, 0x62, 0x73, 0x2f, 0x70, 0x72,
|
||||
0x79, 0x73, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x65, 0x74, 0x68, 0x2f, 0x73, 0x65,
|
||||
0x72, 0x76, 0x69, 0x63, 0x65, 0xaa, 0x02, 0x14, 0x45, 0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d,
|
||||
0x2e, 0x45, 0x74, 0x68, 0x2e, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0xca, 0x02, 0x14, 0x45,
|
||||
0x74, 0x68, 0x65, 0x72, 0x65, 0x75, 0x6d, 0x5c, 0x45, 0x74, 0x68, 0x5c, 0x53, 0x65, 0x72, 0x76,
|
||||
0x69, 0x63, 0x65, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33,
|
||||
}
|
||||
|
||||
var (
|
||||
@@ -1334,60 +1589,71 @@ func file_proto_eth_service_key_management_proto_rawDescGZIP() []byte {
|
||||
}
|
||||
|
||||
var file_proto_eth_service_key_management_proto_enumTypes = make([]protoimpl.EnumInfo, 4)
|
||||
var file_proto_eth_service_key_management_proto_msgTypes = make([]protoimpl.MessageInfo, 17)
|
||||
var file_proto_eth_service_key_management_proto_msgTypes = make([]protoimpl.MessageInfo, 21)
|
||||
var file_proto_eth_service_key_management_proto_goTypes = []interface{}{
|
||||
(ImportedKeystoreStatus_Status)(0), // 0: ethereum.eth.service.ImportedKeystoreStatus.Status
|
||||
(DeletedKeystoreStatus_Status)(0), // 1: ethereum.eth.service.DeletedKeystoreStatus.Status
|
||||
(ImportedRemoteKeysStatus_Status)(0), // 2: ethereum.eth.service.ImportedRemoteKeysStatus.Status
|
||||
(DeletedRemoteKeysStatus_Status)(0), // 3: ethereum.eth.service.DeletedRemoteKeysStatus.Status
|
||||
(*ListKeystoresResponse)(nil), // 4: ethereum.eth.service.ListKeystoresResponse
|
||||
(*ImportKeystoresRequest)(nil), // 5: ethereum.eth.service.ImportKeystoresRequest
|
||||
(*ImportKeystoresResponse)(nil), // 6: ethereum.eth.service.ImportKeystoresResponse
|
||||
(*DeleteKeystoresRequest)(nil), // 7: ethereum.eth.service.DeleteKeystoresRequest
|
||||
(*DeleteKeystoresResponse)(nil), // 8: ethereum.eth.service.DeleteKeystoresResponse
|
||||
(*ImportedKeystoreStatus)(nil), // 9: ethereum.eth.service.ImportedKeystoreStatus
|
||||
(*DeletedKeystoreStatus)(nil), // 10: ethereum.eth.service.DeletedKeystoreStatus
|
||||
(*ListRemoteKeysResponse)(nil), // 11: ethereum.eth.service.ListRemoteKeysResponse
|
||||
(*ImportRemoteKeysRequest)(nil), // 12: ethereum.eth.service.ImportRemoteKeysRequest
|
||||
(*ImportRemoteKeysResponse)(nil), // 13: ethereum.eth.service.ImportRemoteKeysResponse
|
||||
(*DeleteRemoteKeysRequest)(nil), // 14: ethereum.eth.service.DeleteRemoteKeysRequest
|
||||
(*DeleteRemoteKeysResponse)(nil), // 15: ethereum.eth.service.DeleteRemoteKeysResponse
|
||||
(*ImportedRemoteKeysStatus)(nil), // 16: ethereum.eth.service.ImportedRemoteKeysStatus
|
||||
(*DeletedRemoteKeysStatus)(nil), // 17: ethereum.eth.service.DeletedRemoteKeysStatus
|
||||
(*ListKeystoresResponse_Keystore)(nil), // 18: ethereum.eth.service.ListKeystoresResponse.Keystore
|
||||
(*ListRemoteKeysResponse_Keystore)(nil), // 19: ethereum.eth.service.ListRemoteKeysResponse.Keystore
|
||||
(*ImportRemoteKeysRequest_Keystore)(nil), // 20: ethereum.eth.service.ImportRemoteKeysRequest.Keystore
|
||||
(*empty.Empty)(nil), // 21: google.protobuf.Empty
|
||||
(ImportedKeystoreStatus_Status)(0), // 0: ethereum.eth.service.ImportedKeystoreStatus.Status
|
||||
(DeletedKeystoreStatus_Status)(0), // 1: ethereum.eth.service.DeletedKeystoreStatus.Status
|
||||
(ImportedRemoteKeysStatus_Status)(0), // 2: ethereum.eth.service.ImportedRemoteKeysStatus.Status
|
||||
(DeletedRemoteKeysStatus_Status)(0), // 3: ethereum.eth.service.DeletedRemoteKeysStatus.Status
|
||||
(*ListKeystoresResponse)(nil), // 4: ethereum.eth.service.ListKeystoresResponse
|
||||
(*ImportKeystoresRequest)(nil), // 5: ethereum.eth.service.ImportKeystoresRequest
|
||||
(*ImportKeystoresResponse)(nil), // 6: ethereum.eth.service.ImportKeystoresResponse
|
||||
(*DeleteKeystoresRequest)(nil), // 7: ethereum.eth.service.DeleteKeystoresRequest
|
||||
(*DeleteKeystoresResponse)(nil), // 8: ethereum.eth.service.DeleteKeystoresResponse
|
||||
(*ImportedKeystoreStatus)(nil), // 9: ethereum.eth.service.ImportedKeystoreStatus
|
||||
(*DeletedKeystoreStatus)(nil), // 10: ethereum.eth.service.DeletedKeystoreStatus
|
||||
(*ListRemoteKeysResponse)(nil), // 11: ethereum.eth.service.ListRemoteKeysResponse
|
||||
(*ImportRemoteKeysRequest)(nil), // 12: ethereum.eth.service.ImportRemoteKeysRequest
|
||||
(*ImportRemoteKeysResponse)(nil), // 13: ethereum.eth.service.ImportRemoteKeysResponse
|
||||
(*DeleteRemoteKeysRequest)(nil), // 14: ethereum.eth.service.DeleteRemoteKeysRequest
|
||||
(*DeleteRemoteKeysResponse)(nil), // 15: ethereum.eth.service.DeleteRemoteKeysResponse
|
||||
(*ImportedRemoteKeysStatus)(nil), // 16: ethereum.eth.service.ImportedRemoteKeysStatus
|
||||
(*DeletedRemoteKeysStatus)(nil), // 17: ethereum.eth.service.DeletedRemoteKeysStatus
|
||||
(*PubkeyRequest)(nil), // 18: ethereum.eth.service.PubkeyRequest
|
||||
(*GetFeeRecipientByPubkeyResponse)(nil), // 19: ethereum.eth.service.GetFeeRecipientByPubkeyResponse
|
||||
(*SetFeeRecipientByPubkeyRequest)(nil), // 20: ethereum.eth.service.SetFeeRecipientByPubkeyRequest
|
||||
(*ListKeystoresResponse_Keystore)(nil), // 21: ethereum.eth.service.ListKeystoresResponse.Keystore
|
||||
(*ListRemoteKeysResponse_Keystore)(nil), // 22: ethereum.eth.service.ListRemoteKeysResponse.Keystore
|
||||
(*ImportRemoteKeysRequest_Keystore)(nil), // 23: ethereum.eth.service.ImportRemoteKeysRequest.Keystore
|
||||
(*GetFeeRecipientByPubkeyResponse_FeeRecipient)(nil), // 24: ethereum.eth.service.GetFeeRecipientByPubkeyResponse.FeeRecipient
|
||||
(*empty.Empty)(nil), // 25: google.protobuf.Empty
|
||||
}
|
||||
var file_proto_eth_service_key_management_proto_depIdxs = []int32{
|
||||
18, // 0: ethereum.eth.service.ListKeystoresResponse.data:type_name -> ethereum.eth.service.ListKeystoresResponse.Keystore
|
||||
21, // 0: ethereum.eth.service.ListKeystoresResponse.data:type_name -> ethereum.eth.service.ListKeystoresResponse.Keystore
|
||||
9, // 1: ethereum.eth.service.ImportKeystoresResponse.data:type_name -> ethereum.eth.service.ImportedKeystoreStatus
|
||||
10, // 2: ethereum.eth.service.DeleteKeystoresResponse.data:type_name -> ethereum.eth.service.DeletedKeystoreStatus
|
||||
0, // 3: ethereum.eth.service.ImportedKeystoreStatus.status:type_name -> ethereum.eth.service.ImportedKeystoreStatus.Status
|
||||
1, // 4: ethereum.eth.service.DeletedKeystoreStatus.status:type_name -> ethereum.eth.service.DeletedKeystoreStatus.Status
|
||||
19, // 5: ethereum.eth.service.ListRemoteKeysResponse.data:type_name -> ethereum.eth.service.ListRemoteKeysResponse.Keystore
|
||||
20, // 6: ethereum.eth.service.ImportRemoteKeysRequest.remote_keys:type_name -> ethereum.eth.service.ImportRemoteKeysRequest.Keystore
|
||||
22, // 5: ethereum.eth.service.ListRemoteKeysResponse.data:type_name -> ethereum.eth.service.ListRemoteKeysResponse.Keystore
|
||||
23, // 6: ethereum.eth.service.ImportRemoteKeysRequest.remote_keys:type_name -> ethereum.eth.service.ImportRemoteKeysRequest.Keystore
|
||||
16, // 7: ethereum.eth.service.ImportRemoteKeysResponse.data:type_name -> ethereum.eth.service.ImportedRemoteKeysStatus
|
||||
17, // 8: ethereum.eth.service.DeleteRemoteKeysResponse.data:type_name -> ethereum.eth.service.DeletedRemoteKeysStatus
|
||||
2, // 9: ethereum.eth.service.ImportedRemoteKeysStatus.status:type_name -> ethereum.eth.service.ImportedRemoteKeysStatus.Status
|
||||
3, // 10: ethereum.eth.service.DeletedRemoteKeysStatus.status:type_name -> ethereum.eth.service.DeletedRemoteKeysStatus.Status
|
||||
21, // 11: ethereum.eth.service.KeyManagement.ListKeystores:input_type -> google.protobuf.Empty
|
||||
5, // 12: ethereum.eth.service.KeyManagement.ImportKeystores:input_type -> ethereum.eth.service.ImportKeystoresRequest
|
||||
7, // 13: ethereum.eth.service.KeyManagement.DeleteKeystores:input_type -> ethereum.eth.service.DeleteKeystoresRequest
|
||||
21, // 14: ethereum.eth.service.KeyManagement.ListRemoteKeys:input_type -> google.protobuf.Empty
|
||||
12, // 15: ethereum.eth.service.KeyManagement.ImportRemoteKeys:input_type -> ethereum.eth.service.ImportRemoteKeysRequest
|
||||
14, // 16: ethereum.eth.service.KeyManagement.DeleteRemoteKeys:input_type -> ethereum.eth.service.DeleteRemoteKeysRequest
|
||||
4, // 17: ethereum.eth.service.KeyManagement.ListKeystores:output_type -> ethereum.eth.service.ListKeystoresResponse
|
||||
6, // 18: ethereum.eth.service.KeyManagement.ImportKeystores:output_type -> ethereum.eth.service.ImportKeystoresResponse
|
||||
8, // 19: ethereum.eth.service.KeyManagement.DeleteKeystores:output_type -> ethereum.eth.service.DeleteKeystoresResponse
|
||||
11, // 20: ethereum.eth.service.KeyManagement.ListRemoteKeys:output_type -> ethereum.eth.service.ListRemoteKeysResponse
|
||||
13, // 21: ethereum.eth.service.KeyManagement.ImportRemoteKeys:output_type -> ethereum.eth.service.ImportRemoteKeysResponse
|
||||
15, // 22: ethereum.eth.service.KeyManagement.DeleteRemoteKeys:output_type -> ethereum.eth.service.DeleteRemoteKeysResponse
|
||||
17, // [17:23] is the sub-list for method output_type
|
||||
11, // [11:17] is the sub-list for method input_type
|
||||
11, // [11:11] is the sub-list for extension type_name
|
||||
11, // [11:11] is the sub-list for extension extendee
|
||||
0, // [0:11] is the sub-list for field type_name
|
||||
24, // 11: ethereum.eth.service.GetFeeRecipientByPubkeyResponse.data:type_name -> ethereum.eth.service.GetFeeRecipientByPubkeyResponse.FeeRecipient
|
||||
25, // 12: ethereum.eth.service.KeyManagement.ListKeystores:input_type -> google.protobuf.Empty
|
||||
5, // 13: ethereum.eth.service.KeyManagement.ImportKeystores:input_type -> ethereum.eth.service.ImportKeystoresRequest
|
||||
7, // 14: ethereum.eth.service.KeyManagement.DeleteKeystores:input_type -> ethereum.eth.service.DeleteKeystoresRequest
|
||||
25, // 15: ethereum.eth.service.KeyManagement.ListRemoteKeys:input_type -> google.protobuf.Empty
|
||||
12, // 16: ethereum.eth.service.KeyManagement.ImportRemoteKeys:input_type -> ethereum.eth.service.ImportRemoteKeysRequest
|
||||
14, // 17: ethereum.eth.service.KeyManagement.DeleteRemoteKeys:input_type -> ethereum.eth.service.DeleteRemoteKeysRequest
|
||||
18, // 18: ethereum.eth.service.KeyManagement.ListFeeRecipientByPubkey:input_type -> ethereum.eth.service.PubkeyRequest
|
||||
20, // 19: ethereum.eth.service.KeyManagement.SetFeeRecipientByPubkey:input_type -> ethereum.eth.service.SetFeeRecipientByPubkeyRequest
|
||||
18, // 20: ethereum.eth.service.KeyManagement.DeleteFeeRecipientByPubkey:input_type -> ethereum.eth.service.PubkeyRequest
|
||||
4, // 21: ethereum.eth.service.KeyManagement.ListKeystores:output_type -> ethereum.eth.service.ListKeystoresResponse
|
||||
6, // 22: ethereum.eth.service.KeyManagement.ImportKeystores:output_type -> ethereum.eth.service.ImportKeystoresResponse
|
||||
8, // 23: ethereum.eth.service.KeyManagement.DeleteKeystores:output_type -> ethereum.eth.service.DeleteKeystoresResponse
|
||||
11, // 24: ethereum.eth.service.KeyManagement.ListRemoteKeys:output_type -> ethereum.eth.service.ListRemoteKeysResponse
|
||||
13, // 25: ethereum.eth.service.KeyManagement.ImportRemoteKeys:output_type -> ethereum.eth.service.ImportRemoteKeysResponse
|
||||
15, // 26: ethereum.eth.service.KeyManagement.DeleteRemoteKeys:output_type -> ethereum.eth.service.DeleteRemoteKeysResponse
|
||||
19, // 27: ethereum.eth.service.KeyManagement.ListFeeRecipientByPubkey:output_type -> ethereum.eth.service.GetFeeRecipientByPubkeyResponse
|
||||
25, // 28: ethereum.eth.service.KeyManagement.SetFeeRecipientByPubkey:output_type -> google.protobuf.Empty
|
||||
25, // 29: ethereum.eth.service.KeyManagement.DeleteFeeRecipientByPubkey:output_type -> google.protobuf.Empty
|
||||
21, // [21:30] is the sub-list for method output_type
|
||||
12, // [12:21] is the sub-list for method input_type
|
||||
12, // [12:12] is the sub-list for extension type_name
|
||||
12, // [12:12] is the sub-list for extension extendee
|
||||
0, // [0:12] is the sub-list for field type_name
|
||||
}
|
||||
|
||||
func init() { file_proto_eth_service_key_management_proto_init() }
|
||||
@@ -1565,7 +1831,7 @@ func file_proto_eth_service_key_management_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListKeystoresResponse_Keystore); i {
|
||||
switch v := v.(*PubkeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1577,7 +1843,7 @@ func file_proto_eth_service_key_management_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListRemoteKeysResponse_Keystore); i {
|
||||
switch v := v.(*GetFeeRecipientByPubkeyResponse); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
@@ -1589,6 +1855,42 @@ func file_proto_eth_service_key_management_proto_init() {
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*SetFeeRecipientByPubkeyRequest); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListKeystoresResponse_Keystore); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ListRemoteKeysResponse_Keystore); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*ImportRemoteKeysRequest_Keystore); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
@@ -1600,6 +1902,18 @@ func file_proto_eth_service_key_management_proto_init() {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file_proto_eth_service_key_management_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} {
|
||||
switch v := v.(*GetFeeRecipientByPubkeyResponse_FeeRecipient); i {
|
||||
case 0:
|
||||
return &v.state
|
||||
case 1:
|
||||
return &v.sizeCache
|
||||
case 2:
|
||||
return &v.unknownFields
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
type x struct{}
|
||||
out := protoimpl.TypeBuilder{
|
||||
@@ -1607,7 +1921,7 @@ func file_proto_eth_service_key_management_proto_init() {
|
||||
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
|
||||
RawDescriptor: file_proto_eth_service_key_management_proto_rawDesc,
|
||||
NumEnums: 4,
|
||||
NumMessages: 17,
|
||||
NumMessages: 21,
|
||||
NumExtensions: 0,
|
||||
NumServices: 1,
|
||||
},
|
||||
@@ -1640,6 +1954,9 @@ type KeyManagementClient interface {
|
||||
ListRemoteKeys(ctx context.Context, in *empty.Empty, opts ...grpc.CallOption) (*ListRemoteKeysResponse, error)
|
||||
ImportRemoteKeys(ctx context.Context, in *ImportRemoteKeysRequest, opts ...grpc.CallOption) (*ImportRemoteKeysResponse, error)
|
||||
DeleteRemoteKeys(ctx context.Context, in *DeleteRemoteKeysRequest, opts ...grpc.CallOption) (*DeleteRemoteKeysResponse, error)
|
||||
ListFeeRecipientByPubkey(ctx context.Context, in *PubkeyRequest, opts ...grpc.CallOption) (*GetFeeRecipientByPubkeyResponse, error)
|
||||
SetFeeRecipientByPubkey(ctx context.Context, in *SetFeeRecipientByPubkeyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
DeleteFeeRecipientByPubkey(ctx context.Context, in *PubkeyRequest, opts ...grpc.CallOption) (*empty.Empty, error)
|
||||
}
|
||||
|
||||
type keyManagementClient struct {
|
||||
@@ -1704,6 +2021,33 @@ func (c *keyManagementClient) DeleteRemoteKeys(ctx context.Context, in *DeleteRe
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *keyManagementClient) ListFeeRecipientByPubkey(ctx context.Context, in *PubkeyRequest, opts ...grpc.CallOption) (*GetFeeRecipientByPubkeyResponse, error) {
|
||||
out := new(GetFeeRecipientByPubkeyResponse)
|
||||
err := c.cc.Invoke(ctx, "/ethereum.eth.service.KeyManagement/ListFeeRecipientByPubkey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *keyManagementClient) SetFeeRecipientByPubkey(ctx context.Context, in *SetFeeRecipientByPubkeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/ethereum.eth.service.KeyManagement/SetFeeRecipientByPubkey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func (c *keyManagementClient) DeleteFeeRecipientByPubkey(ctx context.Context, in *PubkeyRequest, opts ...grpc.CallOption) (*empty.Empty, error) {
|
||||
out := new(empty.Empty)
|
||||
err := c.cc.Invoke(ctx, "/ethereum.eth.service.KeyManagement/DeleteFeeRecipientByPubkey", in, out, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// KeyManagementServer is the server API for KeyManagement service.
|
||||
type KeyManagementServer interface {
|
||||
ListKeystores(context.Context, *empty.Empty) (*ListKeystoresResponse, error)
|
||||
@@ -1712,6 +2056,9 @@ type KeyManagementServer interface {
|
||||
ListRemoteKeys(context.Context, *empty.Empty) (*ListRemoteKeysResponse, error)
|
||||
ImportRemoteKeys(context.Context, *ImportRemoteKeysRequest) (*ImportRemoteKeysResponse, error)
|
||||
DeleteRemoteKeys(context.Context, *DeleteRemoteKeysRequest) (*DeleteRemoteKeysResponse, error)
|
||||
ListFeeRecipientByPubkey(context.Context, *PubkeyRequest) (*GetFeeRecipientByPubkeyResponse, error)
|
||||
SetFeeRecipientByPubkey(context.Context, *SetFeeRecipientByPubkeyRequest) (*empty.Empty, error)
|
||||
DeleteFeeRecipientByPubkey(context.Context, *PubkeyRequest) (*empty.Empty, error)
|
||||
}
|
||||
|
||||
// UnimplementedKeyManagementServer can be embedded to have forward compatible implementations.
|
||||
@@ -1736,6 +2083,15 @@ func (*UnimplementedKeyManagementServer) ImportRemoteKeys(context.Context, *Impo
|
||||
func (*UnimplementedKeyManagementServer) DeleteRemoteKeys(context.Context, *DeleteRemoteKeysRequest) (*DeleteRemoteKeysResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteRemoteKeys not implemented")
|
||||
}
|
||||
func (*UnimplementedKeyManagementServer) ListFeeRecipientByPubkey(context.Context, *PubkeyRequest) (*GetFeeRecipientByPubkeyResponse, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method ListFeeRecipientByPubkey not implemented")
|
||||
}
|
||||
func (*UnimplementedKeyManagementServer) SetFeeRecipientByPubkey(context.Context, *SetFeeRecipientByPubkeyRequest) (*empty.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method SetFeeRecipientByPubkey not implemented")
|
||||
}
|
||||
func (*UnimplementedKeyManagementServer) DeleteFeeRecipientByPubkey(context.Context, *PubkeyRequest) (*empty.Empty, error) {
|
||||
return nil, status.Errorf(codes.Unimplemented, "method DeleteFeeRecipientByPubkey not implemented")
|
||||
}
|
||||
|
||||
func RegisterKeyManagementServer(s *grpc.Server, srv KeyManagementServer) {
|
||||
s.RegisterService(&_KeyManagement_serviceDesc, srv)
|
||||
@@ -1849,6 +2205,60 @@ func _KeyManagement_DeleteRemoteKeys_Handler(srv interface{}, ctx context.Contex
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _KeyManagement_ListFeeRecipientByPubkey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PubkeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KeyManagementServer).ListFeeRecipientByPubkey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/ethereum.eth.service.KeyManagement/ListFeeRecipientByPubkey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KeyManagementServer).ListFeeRecipientByPubkey(ctx, req.(*PubkeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _KeyManagement_SetFeeRecipientByPubkey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(SetFeeRecipientByPubkeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KeyManagementServer).SetFeeRecipientByPubkey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/ethereum.eth.service.KeyManagement/SetFeeRecipientByPubkey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KeyManagementServer).SetFeeRecipientByPubkey(ctx, req.(*SetFeeRecipientByPubkeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
func _KeyManagement_DeleteFeeRecipientByPubkey_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
|
||||
in := new(PubkeyRequest)
|
||||
if err := dec(in); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if interceptor == nil {
|
||||
return srv.(KeyManagementServer).DeleteFeeRecipientByPubkey(ctx, in)
|
||||
}
|
||||
info := &grpc.UnaryServerInfo{
|
||||
Server: srv,
|
||||
FullMethod: "/ethereum.eth.service.KeyManagement/DeleteFeeRecipientByPubkey",
|
||||
}
|
||||
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
|
||||
return srv.(KeyManagementServer).DeleteFeeRecipientByPubkey(ctx, req.(*PubkeyRequest))
|
||||
}
|
||||
return interceptor(ctx, in, info, handler)
|
||||
}
|
||||
|
||||
var _KeyManagement_serviceDesc = grpc.ServiceDesc{
|
||||
ServiceName: "ethereum.eth.service.KeyManagement",
|
||||
HandlerType: (*KeyManagementServer)(nil),
|
||||
@@ -1877,6 +2287,18 @@ var _KeyManagement_serviceDesc = grpc.ServiceDesc{
|
||||
MethodName: "DeleteRemoteKeys",
|
||||
Handler: _KeyManagement_DeleteRemoteKeys_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "ListFeeRecipientByPubkey",
|
||||
Handler: _KeyManagement_ListFeeRecipientByPubkey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "SetFeeRecipientByPubkey",
|
||||
Handler: _KeyManagement_SetFeeRecipientByPubkey_Handler,
|
||||
},
|
||||
{
|
||||
MethodName: "DeleteFeeRecipientByPubkey",
|
||||
Handler: _KeyManagement_DeleteFeeRecipientByPubkey_Handler,
|
||||
},
|
||||
},
|
||||
Streams: []grpc.StreamDesc{},
|
||||
Metadata: "proto/eth/service/key_management.proto",
|
||||
|
||||
@@ -209,6 +209,200 @@ func local_request_KeyManagement_DeleteRemoteKeys_0(ctx context.Context, marshal
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_ListFeeRecipientByPubkey_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PubkeyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["pubkey"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
|
||||
}
|
||||
|
||||
pubkey, err := runtime.Bytes(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
|
||||
}
|
||||
protoReq.Pubkey = (pubkey)
|
||||
|
||||
msg, err := client.ListFeeRecipientByPubkey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_ListFeeRecipientByPubkey_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PubkeyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["pubkey"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
|
||||
}
|
||||
|
||||
pubkey, err := runtime.Bytes(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
|
||||
}
|
||||
protoReq.Pubkey = (pubkey)
|
||||
|
||||
msg, err := server.ListFeeRecipientByPubkey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_SetFeeRecipientByPubkey_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq SetFeeRecipientByPubkeyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["pubkey"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
|
||||
}
|
||||
|
||||
pubkey, err := runtime.Bytes(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
|
||||
}
|
||||
protoReq.Pubkey = (pubkey)
|
||||
|
||||
msg, err := client.SetFeeRecipientByPubkey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_SetFeeRecipientByPubkey_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq SetFeeRecipientByPubkeyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["pubkey"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
|
||||
}
|
||||
|
||||
pubkey, err := runtime.Bytes(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
|
||||
}
|
||||
protoReq.Pubkey = (pubkey)
|
||||
|
||||
msg, err := server.SetFeeRecipientByPubkey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func request_KeyManagement_DeleteFeeRecipientByPubkey_0(ctx context.Context, marshaler runtime.Marshaler, client KeyManagementClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PubkeyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["pubkey"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
|
||||
}
|
||||
|
||||
pubkey, err := runtime.Bytes(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
|
||||
}
|
||||
protoReq.Pubkey = (pubkey)
|
||||
|
||||
msg, err := client.DeleteFeeRecipientByPubkey(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD))
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
func local_request_KeyManagement_DeleteFeeRecipientByPubkey_0(ctx context.Context, marshaler runtime.Marshaler, server KeyManagementServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) {
|
||||
var protoReq PubkeyRequest
|
||||
var metadata runtime.ServerMetadata
|
||||
|
||||
newReader, berr := utilities.IOReaderFactory(req.Body)
|
||||
if berr != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr)
|
||||
}
|
||||
if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err)
|
||||
}
|
||||
|
||||
var (
|
||||
val string
|
||||
ok bool
|
||||
err error
|
||||
_ = err
|
||||
)
|
||||
|
||||
val, ok = pathParams["pubkey"]
|
||||
if !ok {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "missing parameter %s", "pubkey")
|
||||
}
|
||||
|
||||
pubkey, err := runtime.Bytes(val)
|
||||
if err != nil {
|
||||
return nil, metadata, status.Errorf(codes.InvalidArgument, "type mismatch, parameter: %s, error: %v", "pubkey", err)
|
||||
}
|
||||
protoReq.Pubkey = (pubkey)
|
||||
|
||||
msg, err := server.DeleteFeeRecipientByPubkey(ctx, &protoReq)
|
||||
return msg, metadata, err
|
||||
|
||||
}
|
||||
|
||||
// RegisterKeyManagementHandlerServer registers the http handlers for service KeyManagement to "mux".
|
||||
// UnaryRPC :call KeyManagementServer directly.
|
||||
// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906.
|
||||
@@ -353,6 +547,75 @@ func RegisterKeyManagementHandlerServer(ctx context.Context, mux *runtime.ServeM
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_KeyManagement_ListFeeRecipientByPubkey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListFeeRecipientByPubkey")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_ListFeeRecipientByPubkey_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ListFeeRecipientByPubkey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_KeyManagement_SetFeeRecipientByPubkey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/SetFeeRecipientByPubkey")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_SetFeeRecipientByPubkey_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_SetFeeRecipientByPubkey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_KeyManagement_DeleteFeeRecipientByPubkey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
var stream runtime.ServerTransportStream
|
||||
ctx = grpc.NewContextWithServerTransportStream(ctx, &stream)
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteFeeRecipientByPubkey")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := local_request_KeyManagement_DeleteFeeRecipientByPubkey_0(rctx, inboundMarshaler, server, req, pathParams)
|
||||
md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer())
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_DeleteFeeRecipientByPubkey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -514,6 +777,66 @@ func RegisterKeyManagementHandlerClient(ctx context.Context, mux *runtime.ServeM
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("GET", pattern_KeyManagement_ListFeeRecipientByPubkey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/ListFeeRecipientByPubkey")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_ListFeeRecipientByPubkey_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_ListFeeRecipientByPubkey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("POST", pattern_KeyManagement_SetFeeRecipientByPubkey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/SetFeeRecipientByPubkey")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_SetFeeRecipientByPubkey_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_SetFeeRecipientByPubkey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
mux.Handle("DELETE", pattern_KeyManagement_DeleteFeeRecipientByPubkey_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) {
|
||||
ctx, cancel := context.WithCancel(req.Context())
|
||||
defer cancel()
|
||||
inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req)
|
||||
rctx, err := runtime.AnnotateContext(ctx, mux, req, "/ethereum.eth.service.KeyManagement/DeleteFeeRecipientByPubkey")
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
resp, md, err := request_KeyManagement_DeleteFeeRecipientByPubkey_0(rctx, inboundMarshaler, client, req, pathParams)
|
||||
ctx = runtime.NewServerMetadataContext(ctx, md)
|
||||
if err != nil {
|
||||
runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err)
|
||||
return
|
||||
}
|
||||
|
||||
forward_KeyManagement_DeleteFeeRecipientByPubkey_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...)
|
||||
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -529,6 +852,12 @@ var (
|
||||
pattern_KeyManagement_ImportRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
|
||||
pattern_KeyManagement_DeleteRemoteKeys_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3}, []string{"internal", "eth", "v1", "remotekeys"}, ""))
|
||||
|
||||
pattern_KeyManagement_ListFeeRecipientByPubkey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"internal", "eth", "v1", "validator", "pubkey", "feerecipient"}, ""))
|
||||
|
||||
pattern_KeyManagement_SetFeeRecipientByPubkey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"internal", "eth", "v1", "validator", "pubkey", "feerecipient"}, ""))
|
||||
|
||||
pattern_KeyManagement_DeleteFeeRecipientByPubkey_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1, 2, 2, 2, 3, 1, 0, 4, 1, 5, 4, 2, 5}, []string{"internal", "eth", "v1", "validator", "pubkey", "feerecipient"}, ""))
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -543,4 +872,10 @@ var (
|
||||
forward_KeyManagement_ImportRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_DeleteRemoteKeys_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_ListFeeRecipientByPubkey_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_SetFeeRecipientByPubkey_0 = runtime.ForwardResponseMessage
|
||||
|
||||
forward_KeyManagement_DeleteFeeRecipientByPubkey_0 = runtime.ForwardResponseMessage
|
||||
)
|
||||
|
||||
@@ -85,13 +85,26 @@ service KeyManagement {
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// ListRemoteKeys for all web3signer public validator keys known to the keymanager.
|
||||
//
|
||||
// HTTP response status codes:
|
||||
// - 200: Successful response
|
||||
// - 401: Unauthorized
|
||||
// - 403: Forbidden from accessing the resource
|
||||
// - 500: Validator internal error
|
||||
rpc ListRemoteKeys(google.protobuf.Empty) returns (ListRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/internal/eth/v1/remotekeys"
|
||||
};
|
||||
}
|
||||
|
||||
// ImportRemoteKeys imports and sets web3signer public validator keys in the keymanager.
|
||||
//
|
||||
// HTTP response status codes:
|
||||
// - 200: Successful response
|
||||
// - 401: Unauthorized
|
||||
// - 403: Forbidden from accessing the resource
|
||||
// - 500: Validator internal error
|
||||
rpc ImportRemoteKeys(ImportRemoteKeysRequest) returns (ImportRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
post: "/internal/eth/v1/remotekeys",
|
||||
@@ -99,12 +112,61 @@ service KeyManagement {
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
// DeleteRemoteKeys removes web3signer public validator keys in the keymanager.
|
||||
//
|
||||
// HTTP response status codes:
|
||||
// - 200: Successful response
|
||||
// - 401: Unauthorized
|
||||
// - 403: Forbidden from accessing the resource
|
||||
// - 500: Validator internal error
|
||||
rpc DeleteRemoteKeys(DeleteRemoteKeysRequest) returns (DeleteRemoteKeysResponse) {
|
||||
option (google.api.http) = {
|
||||
delete: "/internal/eth/v1/remotekeys",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// ListFeeRecipientByPubkey returns the hex encoded fee recipient address for the given pubkey.
|
||||
//
|
||||
// HTTP response status codes:
|
||||
// - 200: Successful response
|
||||
// - 401: Unauthorized
|
||||
// - 403: Forbidden from accessing the resource
|
||||
// - 500: Validator internal error
|
||||
rpc ListFeeRecipientByPubkey(PubkeyRequest) returns (GetFeeRecipientByPubkeyResponse) {
|
||||
option (google.api.http) = {
|
||||
get: "/internal/eth/v1/validator/{pubkey}/feerecipient"
|
||||
};
|
||||
}
|
||||
|
||||
// SetFeeRecipientByPubkey sets the fee recipient for the specific public key, overrides the existing one.
|
||||
//
|
||||
// HTTP response status codes:
|
||||
// - 202: Successful response
|
||||
// - 401: Unauthorized
|
||||
// - 403: Forbidden from accessing the resource
|
||||
// - 500: Validator internal error
|
||||
rpc SetFeeRecipientByPubkey(SetFeeRecipientByPubkeyRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
post: "/internal/eth/v1/validator/{pubkey}/feerecipient",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
|
||||
// DeleteFeeRecipientByPubkey deletes the current settings on the fee recipient and replaces with the default fallback fee recipient.
|
||||
//
|
||||
// HTTP response status codes:
|
||||
// - 204: No Content
|
||||
// - 401: Unauthorized
|
||||
// - 403: Forbidden from accessing the resource
|
||||
// - 500: Validator internal error
|
||||
rpc DeleteFeeRecipientByPubkey(PubkeyRequest) returns (google.protobuf.Empty) {
|
||||
option (google.api.http) = {
|
||||
delete: "/internal/eth/v1/validator/{pubkey}/feerecipient",
|
||||
body: "*"
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
message ListKeystoresResponse {
|
||||
@@ -206,3 +268,20 @@ message DeletedRemoteKeysStatus {
|
||||
string message = 2;
|
||||
}
|
||||
|
||||
|
||||
message PubkeyRequest {
|
||||
bytes pubkey = 1;
|
||||
}
|
||||
|
||||
message GetFeeRecipientByPubkeyResponse {
|
||||
message FeeRecipient {
|
||||
bytes pubkey = 1;
|
||||
bytes ethaddress = 2;
|
||||
}
|
||||
FeeRecipient data = 1;
|
||||
}
|
||||
|
||||
message SetFeeRecipientByPubkeyRequest {
|
||||
bytes pubkey = 1;
|
||||
bytes ethaddress = 2;
|
||||
}
|
||||
|
||||
@@ -164,12 +164,11 @@ func Since(time time.Time) types.Slot {
|
||||
// CurrentSlot returns the current slot as determined by the local clock and
|
||||
// provided genesis time.
|
||||
func CurrentSlot(genesisTimeSec uint64) types.Slot {
|
||||
now := prysmTime.Now().Unix()
|
||||
genesis := int64(genesisTimeSec) // lint:ignore uintcast -- Genesis timestamp will not exceed int64 in your lifetime.
|
||||
if now < genesis {
|
||||
now := uint64(prysmTime.Now().Unix())
|
||||
if now < genesisTimeSec {
|
||||
return 0
|
||||
}
|
||||
return types.Slot(uint64(now-genesis) / params.BeaconConfig().SecondsPerSlot)
|
||||
return types.Slot((now - genesisTimeSec) / params.BeaconConfig().SecondsPerSlot)
|
||||
}
|
||||
|
||||
// ValidateClock validates a provided slot against the local
|
||||
|
||||
@@ -14,17 +14,13 @@ import (
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/cmd/validator/flags"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
"github.com/prysmaticlabs/prysm/io/file"
|
||||
"github.com/prysmaticlabs/prysm/io/prompt"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/iface"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/userprompt"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/wallet"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/urfave/cli/v2"
|
||||
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
|
||||
)
|
||||
|
||||
@@ -81,93 +77,31 @@ type ImportAccountsConfig struct {
|
||||
// ImportAccountsCli can import external, EIP-2335 compliant keystore.json files as
|
||||
// new accounts into the Prysm validator wallet. This uses the CLI to extract
|
||||
// values necessary to run the function.
|
||||
func ImportAccountsCli(cliCtx *cli.Context) error {
|
||||
w, err := wallet.OpenWalletOrElseCli(cliCtx, func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
walletDir, err := userprompt.InputDirectory(cliCtx, userprompt.WalletDirPromptText, flags.WalletDirFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
exists, err := wallet.Exists(walletDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, wallet.CheckExistsErrMsg)
|
||||
}
|
||||
if exists {
|
||||
isValid, err := wallet.IsValid(walletDir)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, wallet.CheckValidityErrMsg)
|
||||
}
|
||||
if !isValid {
|
||||
return nil, errors.New(wallet.InvalidWalletErrMsg)
|
||||
}
|
||||
walletPassword, err := wallet.InputPassword(
|
||||
cliCtx,
|
||||
flags.WalletPasswordFileFlag,
|
||||
wallet.PasswordPromptText,
|
||||
false, /* Do not confirm password */
|
||||
wallet.ValidateExistingPass,
|
||||
)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: walletPassword,
|
||||
})
|
||||
}
|
||||
|
||||
cfg, err := extractWalletCreationConfigFromCli(cliCtx, keymanager.Local)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w := wallet.New(&wallet.Config{
|
||||
KeymanagerKind: cfg.WalletCfg.KeymanagerKind,
|
||||
WalletDir: cfg.WalletCfg.WalletDir,
|
||||
WalletPassword: cfg.WalletCfg.WalletPassword,
|
||||
})
|
||||
if err = createLocalKeymanagerWallet(cliCtx.Context, w); err != nil {
|
||||
return nil, errors.Wrap(err, "could not create keymanager")
|
||||
}
|
||||
log.WithField("wallet-path", cfg.WalletCfg.WalletDir).Info(
|
||||
"Successfully created new wallet",
|
||||
)
|
||||
return w, nil
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not initialize wallet")
|
||||
}
|
||||
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
k, ok := km.(keymanager.Importer)
|
||||
func (acm *AccountsCLIManager) Import(ctx context.Context) error {
|
||||
k, ok := acm.keymanager.(keymanager.Importer)
|
||||
if !ok {
|
||||
return errors.New("keymanager cannot import keystores")
|
||||
}
|
||||
|
||||
// Check if the user wishes to import a one-off, private key directly
|
||||
// as an account into the Prysm validator.
|
||||
if cliCtx.IsSet(flags.ImportPrivateKeyFileFlag.Name) {
|
||||
return importPrivateKeyAsAccount(cliCtx, w, k)
|
||||
if acm.importPrivateKeys {
|
||||
return importPrivateKeyAsAccount(ctx, acm.wallet, k, acm.privateKeyFile)
|
||||
}
|
||||
|
||||
keysDir, err := userprompt.InputDirectory(cliCtx, userprompt.ImportKeysDirPromptText, flags.KeysDirFlag)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not parse keys directory")
|
||||
}
|
||||
// Consider that the keysDir might be a path to a specific file and handle accordingly.
|
||||
isDir, err := file.HasDir(keysDir)
|
||||
isDir, err := file.HasDir(acm.keysDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if path is a directory")
|
||||
}
|
||||
keystoresImported := make([]*keymanager.Keystore, 0)
|
||||
if isDir {
|
||||
files, err := os.ReadDir(keysDir)
|
||||
files, err := os.ReadDir(acm.keysDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not read dir")
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return fmt.Errorf("directory %s has no files, cannot import from it", keysDir)
|
||||
return fmt.Errorf("directory %s has no files, cannot import from it", acm.keysDir)
|
||||
}
|
||||
filesInDir := make([]string, 0)
|
||||
for i := 0; i < len(files); i++ {
|
||||
@@ -180,7 +114,7 @@ func ImportAccountsCli(cliCtx *cli.Context) error {
|
||||
// specify this value in their filename.
|
||||
sort.Sort(byDerivationPath(filesInDir))
|
||||
for _, name := range filesInDir {
|
||||
keystore, err := readKeystoreFile(cliCtx.Context, filepath.Join(keysDir, name))
|
||||
keystore, err := readKeystoreFile(ctx, filepath.Join(acm.keysDir, name))
|
||||
if err != nil && strings.Contains(err.Error(), "could not decode keystore json") {
|
||||
continue
|
||||
} else if err != nil {
|
||||
@@ -189,7 +123,7 @@ func ImportAccountsCli(cliCtx *cli.Context) error {
|
||||
keystoresImported = append(keystoresImported, keystore)
|
||||
}
|
||||
} else {
|
||||
keystore, err := readKeystoreFile(cliCtx.Context, keysDir)
|
||||
keystore, err := readKeystoreFile(ctx, acm.keysDir)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not import keystore")
|
||||
}
|
||||
@@ -197,9 +131,8 @@ func ImportAccountsCli(cliCtx *cli.Context) error {
|
||||
}
|
||||
|
||||
var accountsPassword string
|
||||
if cliCtx.IsSet(flags.AccountPasswordFileFlag.Name) {
|
||||
passwordFilePath := cliCtx.String(flags.AccountPasswordFileFlag.Name)
|
||||
data, err := os.ReadFile(passwordFilePath) // #nosec G304
|
||||
if acm.readPasswordFile {
|
||||
data, err := os.ReadFile(acm.passwordFilePath) // #nosec G304
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -213,7 +146,7 @@ func ImportAccountsCli(cliCtx *cli.Context) error {
|
||||
}
|
||||
}
|
||||
fmt.Println("Importing accounts, this may take a while...")
|
||||
statuses, err := ImportAccounts(cliCtx.Context, &ImportAccountsConfig{
|
||||
statuses, err := ImportAccounts(ctx, &ImportAccountsConfig{
|
||||
Importer: k,
|
||||
Keystores: keystoresImported,
|
||||
AccountPassword: accountsPassword,
|
||||
@@ -265,8 +198,7 @@ func ImportAccounts(ctx context.Context, cfg *ImportAccountsConfig) ([]*ethpbser
|
||||
|
||||
// Imports a one-off file containing a private key as a hex string into
|
||||
// the Prysm validator's accounts.
|
||||
func importPrivateKeyAsAccount(cliCtx *cli.Context, wallet *wallet.Wallet, importer keymanager.Importer) error {
|
||||
privKeyFile := cliCtx.String(flags.ImportPrivateKeyFileFlag.Name)
|
||||
func importPrivateKeyAsAccount(ctx context.Context, wallet *wallet.Wallet, importer keymanager.Importer, privKeyFile string) error {
|
||||
fullPath, err := file.ExpandPath(privKeyFile)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not expand file path for %s", privKeyFile)
|
||||
@@ -297,7 +229,7 @@ func importPrivateKeyAsAccount(cliCtx *cli.Context, wallet *wallet.Wallet, impor
|
||||
return errors.Wrap(err, "could not encrypt private key into a keystore file")
|
||||
}
|
||||
statuses, err := ImportAccounts(
|
||||
cliCtx.Context,
|
||||
ctx,
|
||||
&ImportAccountsConfig{
|
||||
Importer: importer,
|
||||
AccountPassword: wallet.Password(),
|
||||
|
||||
@@ -2,15 +2,12 @@ package accounts
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/uuid"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -27,111 +24,6 @@ import (
|
||||
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
|
||||
)
|
||||
|
||||
func TestImport_Noninteractive(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keysDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := CreateWalletWithKeymanager(cliCtx.Context, &CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure there are no accounts at the start.
|
||||
accounts, err := keymanager.ValidatingAccountNames()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(accounts), 0)
|
||||
|
||||
// Create 2 keys.
|
||||
createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
createKeystore(t, keysDir)
|
||||
|
||||
require.NoError(t, ImportAccountsCli(cliCtx))
|
||||
|
||||
w, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(keys))
|
||||
}
|
||||
|
||||
// TestImport_DuplicateKeys is a regression test that ensures correction function if duplicate keys are being imported
|
||||
func TestImport_DuplicateKeys(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keysDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := CreateWalletWithKeymanager(cliCtx.Context, &CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Create a key and then copy it to create a duplicate
|
||||
_, keystorePath := createKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
input, err := os.ReadFile(keystorePath)
|
||||
require.NoError(t, err)
|
||||
keystorePath2 := filepath.Join(keysDir, "copyOfKeystore.json")
|
||||
err = os.WriteFile(keystorePath2, input, os.ModePerm)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.NoError(t, ImportAccountsCli(cliCtx))
|
||||
|
||||
_, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
// There should only be 1 account as the duplicate keystore was ignored
|
||||
assert.Equal(t, 1, len(keys))
|
||||
}
|
||||
|
||||
func TestImportAccounts_NoPassword(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
@@ -166,115 +58,6 @@ func TestImportAccounts_NoPassword(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(resp))
|
||||
require.Equal(t, resp[0].Status, ethpbservice.ImportedKeystoreStatus_ERROR)
|
||||
|
||||
}
|
||||
|
||||
func TestImport_Noninteractive_RandomName(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keysDir,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := CreateWalletWithKeymanager(cliCtx.Context, &CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure there are no accounts at the start.
|
||||
accounts, err := keymanager.ValidatingAccountNames()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(accounts), 0)
|
||||
|
||||
// Create 2 keys.
|
||||
createRandomNameKeystore(t, keysDir)
|
||||
time.Sleep(time.Second)
|
||||
createRandomNameKeystore(t, keysDir)
|
||||
|
||||
require.NoError(t, ImportAccountsCli(cliCtx))
|
||||
|
||||
w, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 2, len(keys))
|
||||
}
|
||||
|
||||
func TestImport_Noninteractive_Filepath(t *testing.T) {
|
||||
local.ResetCaches()
|
||||
walletDir, passwordsDir, passwordFilePath := setupWalletAndPasswordsDir(t)
|
||||
keysDir := filepath.Join(t.TempDir(), "keysDir")
|
||||
require.NoError(t, os.MkdirAll(keysDir, os.ModePerm))
|
||||
|
||||
_, keystorePath := createKeystore(t, keysDir)
|
||||
cliCtx := setupWalletCtx(t, &testWalletConfig{
|
||||
walletDir: walletDir,
|
||||
passwordsDir: passwordsDir,
|
||||
keysDir: keystorePath,
|
||||
keymanagerKind: keymanager.Local,
|
||||
walletPasswordFile: passwordFilePath,
|
||||
accountPasswordFile: passwordFilePath,
|
||||
})
|
||||
w, err := CreateWalletWithKeymanager(cliCtx.Context, &CreateWalletConfig{
|
||||
WalletCfg: &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
KeymanagerKind: keymanager.Local,
|
||||
WalletPassword: password,
|
||||
},
|
||||
})
|
||||
require.NoError(t, err)
|
||||
keymanager, err := local.NewKeymanager(
|
||||
cliCtx.Context,
|
||||
&local.SetupConfig{
|
||||
Wallet: w,
|
||||
ListenForChanges: false,
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Make sure there are no accounts at the start.
|
||||
accounts, err := keymanager.ValidatingAccountNames()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, len(accounts), 0)
|
||||
|
||||
require.NoError(t, ImportAccountsCli(cliCtx))
|
||||
|
||||
w, err = wallet.OpenWallet(cliCtx.Context, &wallet.Config{
|
||||
WalletDir: walletDir,
|
||||
WalletPassword: password,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
km, err := w.InitializeKeymanager(cliCtx.Context, iface.InitKeymanagerConfig{ListenForChanges: false})
|
||||
require.NoError(t, err)
|
||||
keys, err := km.FetchValidatingPublicKeys(cliCtx.Context)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Equal(t, 1, len(keys))
|
||||
}
|
||||
|
||||
func TestImport_SortByDerivationPath(t *testing.T) {
|
||||
@@ -378,7 +161,7 @@ func Test_importPrivateKeyAsAccount(t *testing.T) {
|
||||
},
|
||||
)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, importPrivateKeyAsAccount(cliCtx, wallet, keymanager))
|
||||
assert.NoError(t, importPrivateKeyAsAccount(cliCtx.Context, wallet, keymanager, privKeyFileName))
|
||||
|
||||
// We re-instantiate the keymanager and check we now have 1 public key.
|
||||
keymanager, err = local.NewKeymanager(
|
||||
@@ -419,29 +202,3 @@ func createKeystore(t *testing.T, path string) (*keymanager.Keystore, string) {
|
||||
require.NoError(t, os.WriteFile(fullPath, encoded, os.ModePerm))
|
||||
return keystoreFile, fullPath
|
||||
}
|
||||
|
||||
// Returns the fullPath to the newly created keystore file.
|
||||
func createRandomNameKeystore(t *testing.T, path string) (*keymanager.Keystore, string) {
|
||||
validatingKey, err := bls.RandKey()
|
||||
require.NoError(t, err)
|
||||
encryptor := keystorev4.New()
|
||||
cryptoFields, err := encryptor.Encrypt(validatingKey.Marshal(), password)
|
||||
require.NoError(t, err)
|
||||
id, err := uuid.NewRandom()
|
||||
require.NoError(t, err)
|
||||
keystoreFile := &keymanager.Keystore{
|
||||
Crypto: cryptoFields,
|
||||
ID: id.String(),
|
||||
Pubkey: fmt.Sprintf("%x", validatingKey.PublicKey().Marshal()),
|
||||
Version: encryptor.Version(),
|
||||
Name: encryptor.Name(),
|
||||
}
|
||||
encoded, err := json.MarshalIndent(keystoreFile, "", "\t")
|
||||
require.NoError(t, err)
|
||||
// Write the encoded keystore to disk with the timestamp appended
|
||||
random, err := rand.Int(rand.Reader, big.NewInt(1000000))
|
||||
require.NoError(t, err)
|
||||
fullPath := filepath.Join(path, fmt.Sprintf("test-%d-keystore", random.Int64()))
|
||||
require.NoError(t, os.WriteFile(fullPath, encoded, os.ModePerm))
|
||||
return keystoreFile, fullPath
|
||||
}
|
||||
|
||||
@@ -32,10 +32,15 @@ type AccountsCLIManager struct {
|
||||
showPrivateKeys bool
|
||||
listValidatorIndices bool
|
||||
deletePublicKeys bool
|
||||
importPrivateKeys bool
|
||||
readPasswordFile bool
|
||||
dialOpts []grpc.DialOption
|
||||
grpcHeaders []string
|
||||
beaconRPCProvider string
|
||||
walletKeyCount int
|
||||
privateKeyFile string
|
||||
passwordFilePath string
|
||||
keysDir string
|
||||
backupsDir string
|
||||
backupsPassword string
|
||||
filteredPubKeys []bls.PublicKey
|
||||
|
||||
@@ -90,6 +90,46 @@ func WithDeletePublicKeys(deletePublicKeys bool) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithReadPasswordFile indicates whether to read the password from a file.
|
||||
func WithReadPasswordFile(readPasswordFile bool) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.readPasswordFile = readPasswordFile
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImportPrivateKeys indicates whether to import private keys as accounts.
|
||||
func WithImportPrivateKeys(importPrivateKeys bool) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.importPrivateKeys = importPrivateKeys
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPrivateKeyFile specifies the private key path.
|
||||
func WithPrivateKeyFile(privateKeyFile string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.privateKeyFile = privateKeyFile
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithKeysDir specifies the directory keys are read from.
|
||||
func WithKeysDir(keysDir string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.keysDir = keysDir
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithPasswordFilePath specifies where the password is stored.
|
||||
func WithPasswordFilePath(passwordFilePath string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
acc.passwordFilePath = passwordFilePath
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithBackupDir specifies the directory backups are written to.
|
||||
func WithBackupsDir(backupsDir string) Option {
|
||||
return func(acc *AccountsCLIManager) error {
|
||||
|
||||
@@ -40,7 +40,7 @@ func CreateAndSaveWalletCli(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
createWalletConfig, err := extractWalletCreationConfigFromCli(cliCtx, keymanagerKind)
|
||||
createWalletConfig, err := ExtractWalletCreationConfigFromCli(cliCtx, keymanagerKind)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -72,7 +72,7 @@ func CreateWalletWithKeymanager(ctx context.Context, cfg *CreateWalletConfig) (*
|
||||
var err error
|
||||
switch w.KeymanagerKind() {
|
||||
case keymanager.Local:
|
||||
if err = createLocalKeymanagerWallet(ctx, w); err != nil {
|
||||
if err = CreateLocalKeymanagerWallet(ctx, w); err != nil {
|
||||
return nil, errors.Wrap(err, "could not initialize wallet")
|
||||
}
|
||||
// TODO(#9883) - Remove this when we have a better way to handle this. should be safe to use for now.
|
||||
@@ -131,7 +131,8 @@ func extractKeymanagerKindFromCli(cliCtx *cli.Context) (keymanager.Kind, error)
|
||||
return inputKeymanagerKind(cliCtx)
|
||||
}
|
||||
|
||||
func extractWalletCreationConfigFromCli(cliCtx *cli.Context, keymanagerKind keymanager.Kind) (*CreateWalletConfig, error) {
|
||||
// ExtractWalletCreationConfigFromCli prompts the user for wallet creation input.
|
||||
func ExtractWalletCreationConfigFromCli(cliCtx *cli.Context, keymanagerKind keymanager.Kind) (*CreateWalletConfig, error) {
|
||||
walletDir, err := userprompt.InputDirectory(cliCtx, userprompt.WalletDirPromptText, flags.WalletDirFlag)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -204,7 +205,7 @@ func extractWalletCreationConfigFromCli(cliCtx *cli.Context, keymanagerKind keym
|
||||
return createWalletConfig, nil
|
||||
}
|
||||
|
||||
func createLocalKeymanagerWallet(_ context.Context, wallet *wallet.Wallet) error {
|
||||
func CreateLocalKeymanagerWallet(_ context.Context, wallet *wallet.Wallet) error {
|
||||
if wallet == nil {
|
||||
return errors.New("nil wallet")
|
||||
}
|
||||
|
||||
@@ -119,7 +119,7 @@ func TestCreateOrOpenWallet(t *testing.T) {
|
||||
walletPasswordFile: walletPasswordFile,
|
||||
})
|
||||
createLocalWallet := func(cliCtx *cli.Context) (*wallet.Wallet, error) {
|
||||
cfg, err := extractWalletCreationConfigFromCli(cliCtx, keymanager.Local)
|
||||
cfg, err := ExtractWalletCreationConfigFromCli(cliCtx, keymanager.Local)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -128,7 +128,7 @@ func TestCreateOrOpenWallet(t *testing.T) {
|
||||
WalletDir: cfg.WalletCfg.WalletDir,
|
||||
WalletPassword: cfg.WalletCfg.WalletPassword,
|
||||
})
|
||||
if err = createLocalKeymanagerWallet(cliCtx.Context, w); err != nil {
|
||||
if err = CreateLocalKeymanagerWallet(cliCtx.Context, w); err != nil {
|
||||
return nil, errors.Wrap(err, "could not create keymanager")
|
||||
}
|
||||
log.WithField("wallet-path", cfg.WalletCfg.WalletDir).Info(
|
||||
|
||||
@@ -10,7 +10,6 @@ import (
|
||||
types "github.com/prysmaticlabs/prysm/consensus-types/primitives"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"go.opencensus.io/trace"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
@@ -58,20 +57,18 @@ func signValidatorRegistration(
|
||||
signer signingFunc,
|
||||
reg *ethpb.ValidatorRegistrationV1,
|
||||
) ([]byte, error) {
|
||||
req := ðpb.DomainRequest{
|
||||
Epoch: slots.ToEpoch(slot),
|
||||
Domain: params.BeaconConfig().DomainApplicationBuilder[:],
|
||||
}
|
||||
|
||||
domain, err := validatorClient.DomainData(ctx, req)
|
||||
// Per spec, we want the fork version and genesis validator to be nil.
|
||||
// Which is genesis value and zero by default.
|
||||
d, err := signing.ComputeDomain(
|
||||
params.BeaconConfig().DomainApplicationBuilder,
|
||||
nil, /* fork version */
|
||||
nil /* genesis val root */)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, domainDataErr)
|
||||
}
|
||||
if domain == nil {
|
||||
return nil, errors.New(domainDataErr)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
r, err := signing.ComputeSigningRoot(reg, domain.SignatureDomain)
|
||||
r, err := signing.ComputeSigningRoot(reg, d)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, signingRootErr)
|
||||
}
|
||||
@@ -79,7 +76,7 @@ func signValidatorRegistration(
|
||||
sig, err := signer(ctx, &validatorpb.SignRequest{
|
||||
PublicKey: reg.Pubkey,
|
||||
SigningRoot: r[:],
|
||||
SignatureDomain: domain.SignatureDomain,
|
||||
SignatureDomain: d,
|
||||
Object: &validatorpb.SignRequest_Registration{Registration: reg},
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -32,11 +32,6 @@ func TestSubmitValidatorRegistration(t *testing.T) {
|
||||
GetGenesis(gomock.Any(), &emptypb.Empty{}).
|
||||
Return(ðpb.Genesis{GenesisTime: ti}, nil)
|
||||
|
||||
m.validatorClient.EXPECT().DomainData(
|
||||
gomock.Any(), // ctx
|
||||
ðpb.DomainRequest{Domain: params.BeaconConfig().DomainApplicationBuilder[:]},
|
||||
).Times(1).Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil /*err*/)
|
||||
|
||||
m.validatorClient.EXPECT().
|
||||
SubmitValidatorRegistration(gomock.Any(), ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
@@ -46,31 +41,6 @@ func TestSubmitValidatorRegistration(t *testing.T) {
|
||||
require.NoError(t, nil, SubmitValidatorRegistration(ctx, m.validatorClient, m.nodeClient, m.signfunc, reg))
|
||||
}
|
||||
|
||||
func TestSubmitValidatorRegistration_InvalidDomain(t *testing.T) {
|
||||
_, m, validatorKey, finish := setup(t)
|
||||
defer finish()
|
||||
|
||||
ctx := context.Background()
|
||||
reg := ðpb.ValidatorRegistrationV1{
|
||||
FeeRecipient: bytesutil.PadTo([]byte("fee"), 20),
|
||||
GasLimit: 123456,
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: validatorKey.PublicKey().Marshal(),
|
||||
}
|
||||
|
||||
genesisTime := ×tamppb.Timestamp{}
|
||||
m.nodeClient.EXPECT().
|
||||
GetGenesis(gomock.Any(), &emptypb.Empty{}).
|
||||
Return(ðpb.Genesis{GenesisTime: genesisTime}, nil)
|
||||
|
||||
m.validatorClient.EXPECT().DomainData(
|
||||
gomock.Any(), // ctx
|
||||
ðpb.DomainRequest{Domain: params.BeaconConfig().DomainApplicationBuilder[:]},
|
||||
).Times(1).Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, errors.New(domainDataErr))
|
||||
|
||||
require.ErrorContains(t, domainDataErr, SubmitValidatorRegistration(ctx, m.validatorClient, m.nodeClient, m.signfunc, reg))
|
||||
}
|
||||
|
||||
func TestSubmitValidatorRegistration_CantSign(t *testing.T) {
|
||||
_, m, validatorKey, finish := setup(t)
|
||||
defer finish()
|
||||
@@ -88,11 +58,6 @@ func TestSubmitValidatorRegistration_CantSign(t *testing.T) {
|
||||
GetGenesis(gomock.Any(), &emptypb.Empty{}).
|
||||
Return(ðpb.Genesis{GenesisTime: genesisTime}, nil)
|
||||
|
||||
m.validatorClient.EXPECT().DomainData(
|
||||
gomock.Any(), // ctx
|
||||
ðpb.DomainRequest{Domain: params.BeaconConfig().DomainApplicationBuilder[:]},
|
||||
).Times(1).Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil /*err*/)
|
||||
|
||||
m.validatorClient.EXPECT().
|
||||
SubmitValidatorRegistration(gomock.Any(), ðpb.SignedValidatorRegistrationV1{
|
||||
Message: reg,
|
||||
@@ -113,23 +78,10 @@ func Test_signValidatorRegistration(t *testing.T) {
|
||||
Timestamp: uint64(time.Now().Unix()),
|
||||
Pubkey: validatorKey.PublicKey().Marshal(),
|
||||
}
|
||||
m.validatorClient.EXPECT().DomainData(
|
||||
gomock.Any(), // ctx
|
||||
ðpb.DomainRequest{Domain: params.BeaconConfig().DomainApplicationBuilder[:]},
|
||||
).Times(1).Return(ðpb.DomainResponse{SignatureDomain: make([]byte, 32)}, nil /*err*/)
|
||||
_, err := signValidatorRegistration(
|
||||
ctx,
|
||||
1,
|
||||
m.validatorClient, m.signfunc, reg)
|
||||
require.NoError(t, err)
|
||||
|
||||
m.validatorClient.EXPECT().DomainData(
|
||||
gomock.Any(), // ctx
|
||||
ðpb.DomainRequest{Domain: params.BeaconConfig().DomainApplicationBuilder[:]},
|
||||
).Times(1).Return(nil, errors.New(domainDataErr) /*err*/)
|
||||
_, err = signValidatorRegistration(
|
||||
ctx,
|
||||
1,
|
||||
m.validatorClient, m.signfunc, reg)
|
||||
require.ErrorContains(t, domainDataErr, err)
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ type ValidatorService struct {
|
||||
grpcHeaders []string
|
||||
graffiti []byte
|
||||
Web3SignerConfig *remote_web3signer.SetupConfig
|
||||
proposerSettings *validator_service_config.ProposerSettings
|
||||
ProposerSettings *validator_service_config.ProposerSettings
|
||||
}
|
||||
|
||||
// Config for the validator service.
|
||||
@@ -123,7 +123,7 @@ func NewValidatorService(ctx context.Context, cfg *Config) (*ValidatorService, e
|
||||
graffitiStruct: cfg.GraffitiStruct,
|
||||
logDutyCountDown: cfg.LogDutyCountDown,
|
||||
Web3SignerConfig: cfg.Web3SignerConfig,
|
||||
proposerSettings: cfg.ProposerSettings,
|
||||
ProposerSettings: cfg.ProposerSettings,
|
||||
}
|
||||
|
||||
dialOpts := ConstructDialOptions(
|
||||
@@ -206,7 +206,7 @@ func (v *ValidatorService) Start() {
|
||||
eipImportBlacklistedPublicKeys: slashablePublicKeys,
|
||||
logDutyCountDown: v.logDutyCountDown,
|
||||
Web3SignerConfig: v.Web3SignerConfig,
|
||||
ProposerSettings: v.proposerSettings,
|
||||
ProposerSettings: v.ProposerSettings,
|
||||
walletIntializedChannel: make(chan *wallet.Wallet, 1),
|
||||
}
|
||||
// To resolve a race condition at startup due to the interface
|
||||
|
||||
@@ -403,14 +403,6 @@ func TestWaitMultipleActivation_LogsActivationEpochOK(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -474,14 +466,6 @@ func TestWaitActivation_NotAllValidatorsActivatedOK(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -1588,15 +1572,6 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
&emptypb.Empty{},
|
||||
).Times(2).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Times(2).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -1662,14 +1637,6 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -1749,14 +1716,6 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -1826,14 +1785,6 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -1945,14 +1896,6 @@ func TestValidator_PushProposerSettings(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
|
||||
@@ -114,14 +114,6 @@ func TestWaitActivation_StreamSetupFails_AttemptsToReconnect(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -181,14 +173,6 @@ func TestWaitForActivation_ReceiveErrorFromStream_AttemptsReconnection(t *testin
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -244,14 +228,6 @@ func TestWaitActivation_LogsActivationEpochOK(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -312,14 +288,6 @@ func TestWaitForActivation_Exiting(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -386,14 +354,6 @@ func TestWaitForActivation_RefetchKeys(t *testing.T) {
|
||||
).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -484,14 +444,6 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) {
|
||||
).Times(2).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Times(2).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -592,14 +544,6 @@ func TestWaitForActivation_AccountsChanged(t *testing.T) {
|
||||
).Times(2).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Times(2).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -686,14 +630,6 @@ func TestWaitForActivation_RemoteKeymanager(t *testing.T) {
|
||||
&emptypb.Empty{},
|
||||
).Times(2).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Times(2).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
@@ -789,14 +725,6 @@ func TestWaitForActivation_RemoteKeymanager(t *testing.T) {
|
||||
).Times(2).Return(
|
||||
ðpb.Genesis{GenesisTime: timestamppb.Now()}, nil)
|
||||
|
||||
client.EXPECT().DomainData(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
).Times(2).Return(
|
||||
ðpb.DomainResponse{
|
||||
SignatureDomain: make([]byte, 32),
|
||||
},
|
||||
nil)
|
||||
client.EXPECT().SubmitValidatorRegistration(
|
||||
gomock.Any(),
|
||||
gomock.Any(),
|
||||
|
||||
@@ -678,6 +678,7 @@ func (c *ValidatorClient) registerRPCGatewayService(cliCtx *cli.Context) error {
|
||||
gwruntime.WithMarshalerOption(
|
||||
"text/event-stream", &gwruntime.EventSourceJSONPb{},
|
||||
),
|
||||
gwruntime.WithForwardResponseOption(gateway.HttpResponseModifier),
|
||||
)
|
||||
muxHandler := func(apiMware *apimiddleware.ApiProxyMiddleware, h http.HandlerFunc, w http.ResponseWriter, req *http.Request) {
|
||||
// The validator gateway handler requires this special logic as it serves two kinds of APIs, namely
|
||||
|
||||
@@ -26,6 +26,8 @@ go_library(
|
||||
"//cmd:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//config/validator/service:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -47,6 +49,8 @@ go_library(
|
||||
"//validator/keymanager/local:go_default_library",
|
||||
"//validator/slashing-protection-history:go_default_library",
|
||||
"//validator/slashing-protection-history/format:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_fsnotify_fsnotify//:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_go_grpc_middleware//:go_default_library",
|
||||
@@ -90,6 +94,8 @@ go_test(
|
||||
"//cmd/validator/flags:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//config/validator/service:go_default_library",
|
||||
"//crypto/bls:go_default_library",
|
||||
"//crypto/rand:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
@@ -112,10 +118,12 @@ go_test(
|
||||
"//validator/keymanager/remote-web3signer:go_default_library",
|
||||
"//validator/slashing-protection-history/format:go_default_library",
|
||||
"//validator/testing:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_golang_mock//gomock:go_default_library",
|
||||
"@com_github_google_uuid//:go_default_library",
|
||||
"@com_github_grpc_ecosystem_grpc_gateway_v2//runtime:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_tyler_smith_go_bip39//:go_default_library",
|
||||
"@com_github_wealdtech_go_eth2_wallet_encryptor_keystorev4//:go_default_library",
|
||||
|
||||
@@ -18,6 +18,7 @@ func (*ValidatorEndpointFactory) Paths() []string {
|
||||
return []string{
|
||||
"/eth/v1/keystores",
|
||||
"/eth/v1/remotekeys",
|
||||
"/eth/v1/validator/{pubkey}/feerecipient",
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,6 +38,9 @@ func (*ValidatorEndpointFactory) Create(path string) (*apimiddleware.Endpoint, e
|
||||
endpoint.PostResponse = &importRemoteKeysResponseJson{}
|
||||
endpoint.DeleteRequest = &deleteRemoteKeysRequestJson{}
|
||||
endpoint.DeleteResponse = &deleteRemoteKeysResponseJson{}
|
||||
case "/eth/v1/validator/{pubkey}/feerecipient":
|
||||
endpoint.GetResponse = &getFeeRecipientByPubkeyResponseJson{}
|
||||
endpoint.PostRequest = &setFeeRecipientByPubkeyRequestJson{}
|
||||
default:
|
||||
return nil, errors.New("invalid path")
|
||||
}
|
||||
|
||||
@@ -66,3 +66,16 @@ type deleteRemoteKeysRequestJson struct {
|
||||
type deleteRemoteKeysResponseJson struct {
|
||||
Statuses []*statusJson `json:"data"`
|
||||
}
|
||||
|
||||
type feeRecipientJson struct {
|
||||
Pubkey string `json:"pubkey" hex:"true"`
|
||||
Ethaddress string `json:"ethaddress" address:"true"`
|
||||
}
|
||||
|
||||
type getFeeRecipientByPubkeyResponseJson struct {
|
||||
Data *feeRecipientJson `json:"data"`
|
||||
}
|
||||
|
||||
type setFeeRecipientByPubkeyRequestJson struct {
|
||||
Ethaddress string `json:"ethaddress" hex:"true"`
|
||||
}
|
||||
|
||||
@@ -6,15 +6,21 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
validatorServiceConfig "github.com/prysmaticlabs/prysm/config/validator/service"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager"
|
||||
"github.com/prysmaticlabs/prysm/validator/keymanager/derived"
|
||||
slashingprotection "github.com/prysmaticlabs/prysm/validator/slashing-protection-history"
|
||||
"github.com/prysmaticlabs/prysm/validator/slashing-protection-history/format"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
@@ -382,3 +388,121 @@ func groupDeleteRemoteKeysErrors(req *ethpbservice.DeleteRemoteKeysRequest, erro
|
||||
}
|
||||
return statuses
|
||||
}
|
||||
|
||||
// ListFeeRecipientByPubkey returns the public key to eth address mapping object to the end user.
|
||||
func (s *Server) ListFeeRecipientByPubkey(ctx context.Context, req *ethpbservice.PubkeyRequest) (*ethpbservice.GetFeeRecipientByPubkeyResponse, error) {
|
||||
if s.validatorService == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready")
|
||||
}
|
||||
validatorKey := req.Pubkey
|
||||
if err := validatePublicKey(validatorKey); err != nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
defaultFeeRecipient := params.BeaconConfig().DefaultFeeRecipient.Bytes()
|
||||
if s.validatorService.ProposerSettings == nil {
|
||||
return ðpbservice.GetFeeRecipientByPubkeyResponse{
|
||||
Data: ðpbservice.GetFeeRecipientByPubkeyResponse_FeeRecipient{
|
||||
Pubkey: validatorKey,
|
||||
Ethaddress: defaultFeeRecipient,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
if s.validatorService.ProposerSettings.ProposeConfig != nil {
|
||||
proposerOption, found := s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(validatorKey)]
|
||||
if found {
|
||||
return ðpbservice.GetFeeRecipientByPubkeyResponse{
|
||||
Data: ðpbservice.GetFeeRecipientByPubkeyResponse_FeeRecipient{
|
||||
Pubkey: validatorKey,
|
||||
Ethaddress: proposerOption.FeeRecipient.Bytes(),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
}
|
||||
if s.validatorService.ProposerSettings.DefaultConfig != nil {
|
||||
defaultFeeRecipient = s.validatorService.ProposerSettings.DefaultConfig.FeeRecipient.Bytes()
|
||||
}
|
||||
return ðpbservice.GetFeeRecipientByPubkeyResponse{
|
||||
Data: ðpbservice.GetFeeRecipientByPubkeyResponse_FeeRecipient{
|
||||
Pubkey: validatorKey,
|
||||
Ethaddress: defaultFeeRecipient,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
// SetFeeRecipientByPubkey updates the eth address mapped to the public key.
|
||||
func (s *Server) SetFeeRecipientByPubkey(ctx context.Context, req *ethpbservice.SetFeeRecipientByPubkeyRequest) (*empty.Empty, error) {
|
||||
if s.validatorService == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready")
|
||||
}
|
||||
validatorKey := req.Pubkey
|
||||
if err := validatePublicKey(validatorKey); err != nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
defaultOption := validatorServiceConfig.DefaultProposerOption()
|
||||
encoded := hexutil.Encode(req.Ethaddress)
|
||||
if !common.IsHexAddress(encoded) {
|
||||
return nil, status.Error(
|
||||
codes.InvalidArgument, "Fee recipient is not a valid Ethereum address")
|
||||
}
|
||||
pOption := validatorServiceConfig.DefaultProposerOption()
|
||||
pOption.FeeRecipient = common.BytesToAddress(req.Ethaddress)
|
||||
switch {
|
||||
case s.validatorService.ProposerSettings == nil:
|
||||
s.validatorService.ProposerSettings = &validatorServiceConfig.ProposerSettings{
|
||||
ProposeConfig: map[[fieldparams.BLSPubkeyLength]byte]*validatorServiceConfig.ProposerOption{
|
||||
bytesutil.ToBytes48(validatorKey): &pOption,
|
||||
},
|
||||
DefaultConfig: &defaultOption,
|
||||
}
|
||||
case s.validatorService.ProposerSettings.ProposeConfig == nil:
|
||||
s.validatorService.ProposerSettings.ProposeConfig = map[[fieldparams.BLSPubkeyLength]byte]*validatorServiceConfig.ProposerOption{
|
||||
bytesutil.ToBytes48(validatorKey): &pOption,
|
||||
}
|
||||
default:
|
||||
proposerOption, found := s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(validatorKey)]
|
||||
if found {
|
||||
proposerOption.FeeRecipient = common.BytesToAddress(req.Ethaddress)
|
||||
} else {
|
||||
s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(validatorKey)] = &pOption
|
||||
}
|
||||
}
|
||||
// override the 200 success with 202 according to the specs
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", "202")); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
// DeleteFeeRecipientByPubkey updates the eth address mapped to the public key to the default fee recipient listed
|
||||
func (s *Server) DeleteFeeRecipientByPubkey(ctx context.Context, req *ethpbservice.PubkeyRequest) (*empty.Empty, error) {
|
||||
if s.validatorService == nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, "Validator service not ready")
|
||||
}
|
||||
validatorKey := req.Pubkey
|
||||
if err := validatePublicKey(validatorKey); err != nil {
|
||||
return nil, status.Error(codes.FailedPrecondition, err.Error())
|
||||
}
|
||||
defaultFeeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
if s.validatorService.ProposerSettings != nil && s.validatorService.ProposerSettings.DefaultConfig != nil {
|
||||
defaultFeeRecipient = s.validatorService.ProposerSettings.DefaultConfig.FeeRecipient
|
||||
}
|
||||
if s.validatorService.ProposerSettings != nil && s.validatorService.ProposerSettings.ProposeConfig != nil {
|
||||
proposerOption, found := s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(validatorKey)]
|
||||
if found {
|
||||
proposerOption.FeeRecipient = defaultFeeRecipient
|
||||
}
|
||||
}
|
||||
// override the 200 success with 204 according to the specs
|
||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", "204")); err != nil {
|
||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
||||
}
|
||||
return &empty.Empty{}, nil
|
||||
}
|
||||
|
||||
func validatePublicKey(pubkey []byte) error {
|
||||
if len(pubkey) != fieldparams.BLSPubkeyLength {
|
||||
return status.Errorf(
|
||||
codes.InvalidArgument, "Provided public key in path is not byte length %d and not a valid bls public key", fieldparams.BLSPubkeyLength)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -7,14 +7,19 @@ import (
|
||||
"fmt"
|
||||
"testing"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/golang/protobuf/ptypes/empty"
|
||||
"github.com/google/uuid"
|
||||
"github.com/grpc-ecosystem/grpc-gateway/v2/runtime"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
validator_service_config "github.com/prysmaticlabs/prysm/config/validator/service"
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpbservice "github.com/prysmaticlabs/prysm/proto/eth/service"
|
||||
validatorpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/validator-client"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts"
|
||||
"github.com/prysmaticlabs/prysm/validator/accounts/iface"
|
||||
@@ -28,6 +33,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/validator/slashing-protection-history/format"
|
||||
mocks "github.com/prysmaticlabs/prysm/validator/testing"
|
||||
keystorev4 "github.com/wealdtech/go-eth2-wallet-encryptor-keystorev4"
|
||||
"google.golang.org/grpc"
|
||||
)
|
||||
|
||||
func TestServer_ListKeystores(t *testing.T) {
|
||||
@@ -680,3 +686,148 @@ func TestServer_DeleteRemoteKeys(t *testing.T) {
|
||||
require.Equal(t, 0, len(expectedKeys))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_ListFeeRecipientByPubkey(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
byteval, err := hexutil.Decode("0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493")
|
||||
require.NoError(t, err)
|
||||
|
||||
type want struct {
|
||||
EthAddress string
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
args *validator_service_config.ProposerSettings
|
||||
want *want
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Happy Path Test",
|
||||
args: &validator_service_config.ProposerSettings{
|
||||
ProposeConfig: map[[48]byte]*validator_service_config.ProposerOption{
|
||||
bytesutil.ToBytes48(byteval): {
|
||||
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
|
||||
},
|
||||
},
|
||||
DefaultConfig: &validator_service_config.ProposerOption{
|
||||
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
|
||||
},
|
||||
},
|
||||
want: &want{
|
||||
EthAddress: "0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
name: "empty settings",
|
||||
args: nil,
|
||||
want: &want{
|
||||
EthAddress: params.BeaconConfig().DefaultFeeRecipient.Hex(),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Validator: &mock.MockValidator{},
|
||||
ProposerSettings: tt.args,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
validatorService: vs,
|
||||
}
|
||||
got, err := s.ListFeeRecipientByPubkey(ctx, ðpbservice.PubkeyRequest{Pubkey: byteval})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want.EthAddress, common.BytesToAddress(got.Data.Ethaddress).Hex())
|
||||
})
|
||||
}
|
||||
}
|
||||
func TestServer_SetFeeRecipientByPubkey(t *testing.T) {
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
|
||||
byteval, err := hexutil.Decode("0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493")
|
||||
require.NoError(t, err)
|
||||
type want struct {
|
||||
EthAddress string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args string
|
||||
proposerSettings *validator_service_config.ProposerSettings
|
||||
want *want
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Happy Path Test",
|
||||
args: "0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9",
|
||||
want: &want{
|
||||
EthAddress: "0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9",
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Validator: &mock.MockValidator{},
|
||||
ProposerSettings: tt.proposerSettings,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
validatorService: vs,
|
||||
}
|
||||
_, err = s.SetFeeRecipientByPubkey(ctx, ðpbservice.SetFeeRecipientByPubkeyRequest{Pubkey: byteval, Ethaddress: common.HexToAddress(tt.args).Bytes()})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want.EthAddress, s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(byteval)].FeeRecipient.Hex())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestServer_DeleteFeeRecipientByPubkey(t *testing.T) {
|
||||
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
|
||||
byteval, err := hexutil.Decode("0xaf2e7ba294e03438ea819bd4033c6c1bf6b04320ee2075b77273c08d02f8a61bcc303c2c06bd3713cb442072ae591493")
|
||||
require.NoError(t, err)
|
||||
type want struct {
|
||||
EthAddress string
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
proposerSettings *validator_service_config.ProposerSettings
|
||||
want *want
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "Happy Path Test",
|
||||
proposerSettings: &validator_service_config.ProposerSettings{
|
||||
ProposeConfig: map[[48]byte]*validator_service_config.ProposerOption{
|
||||
bytesutil.ToBytes48(byteval): {
|
||||
FeeRecipient: common.HexToAddress("0x055Fb65722E7b2455012BFEBf6177F1D2e9738D5"),
|
||||
},
|
||||
},
|
||||
DefaultConfig: &validator_service_config.ProposerOption{
|
||||
FeeRecipient: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9"),
|
||||
},
|
||||
},
|
||||
want: &want{
|
||||
EthAddress: common.HexToAddress("0x046Fb65722E7b2455012BFEBf6177F1D2e9738D9").Hex(),
|
||||
},
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
vs, err := client.NewValidatorService(ctx, &client.Config{
|
||||
Validator: &mock.MockValidator{},
|
||||
ProposerSettings: tt.proposerSettings,
|
||||
})
|
||||
require.NoError(t, err)
|
||||
s := &Server{
|
||||
validatorService: vs,
|
||||
}
|
||||
_, err = s.DeleteFeeRecipientByPubkey(ctx, ðpbservice.PubkeyRequest{Pubkey: byteval})
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, tt.want.EthAddress, s.validatorService.ProposerSettings.ProposeConfig[bytesutil.ToBytes48(byteval)].FeeRecipient.Hex())
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user