mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-09 21:38:05 -05:00
Compare commits
63 Commits
kiln-debug
...
debug-e2e
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
9c5f519b11 | ||
|
|
3bb6bc6dfd | ||
|
|
daaa806342 | ||
|
|
19ed30a74c | ||
|
|
cf438e90f7 | ||
|
|
c5840a86c1 | ||
|
|
760fe3cc2a | ||
|
|
276b7e96cc | ||
|
|
e39a006a7b | ||
|
|
e290556111 | ||
|
|
91a6928efb | ||
|
|
8d5fd3da54 | ||
|
|
8b6ab3e816 | ||
|
|
51598ed0d5 | ||
|
|
2a9129ee85 | ||
|
|
aac1f4895c | ||
|
|
86e36fe3a2 | ||
|
|
ece54828d4 | ||
|
|
afe6c5e6cd | ||
|
|
08aff8f60f | ||
|
|
6903d52dde | ||
|
|
ac8d27bcf1 | ||
|
|
8d6afb3afd | ||
|
|
d51f716675 | ||
|
|
0411b7eceb | ||
|
|
8dfc80187d | ||
|
|
3833f78803 | ||
|
|
83a83279d4 | ||
|
|
bdab34fd01 | ||
|
|
de0143e036 | ||
|
|
2a7a09b112 | ||
|
|
984575ed57 | ||
|
|
927e338f9e | ||
|
|
f44c99d92a | ||
|
|
7d669f23ab | ||
|
|
9b64c33bd1 | ||
|
|
defa602e50 | ||
|
|
67c8776f3c | ||
|
|
896d186e3b | ||
|
|
edb98cf499 | ||
|
|
ce15823f8d | ||
|
|
7cdc741b2f | ||
|
|
5704fb34be | ||
|
|
3e2a037d42 | ||
|
|
177f9ccab0 | ||
|
|
649dee532f | ||
|
|
4b3364ac6b | ||
|
|
0df8d7f0c0 | ||
|
|
ade7d705ec | ||
|
|
c68894b77d | ||
|
|
fe98d69c0a | ||
|
|
7516bc0316 | ||
|
|
be6f3892e8 | ||
|
|
588605ceeb | ||
|
|
59b9519284 | ||
|
|
d4038fb752 | ||
|
|
56ab5872bb | ||
|
|
eb6d68a4b1 | ||
|
|
7920528ede | ||
|
|
1af3c07ec5 | ||
|
|
7279349ae2 | ||
|
|
2d60d04b57 | ||
|
|
ef8bc97d3e |
4
.github/actions/gomodtidy/entrypoint.sh
vendored
4
.github/actions/gomodtidy/entrypoint.sh
vendored
@@ -1,8 +1,8 @@
|
||||
#!/bin/sh -l
|
||||
set -e
|
||||
export PATH=$PATH:/usr/local/go/bin
|
||||
export PATH="$PATH:/usr/local/go/bin"
|
||||
|
||||
cd $GITHUB_WORKSPACE
|
||||
cd "$GITHUB_WORKSPACE"
|
||||
|
||||
cp go.mod go.mod.orig
|
||||
cp go.sum go.sum.orig
|
||||
|
||||
41
.github/workflows/dappnode-release-trigger.yml
vendored
41
.github/workflows/dappnode-release-trigger.yml
vendored
@@ -1,41 +0,0 @@
|
||||
name: Update DAppNodePackages
|
||||
|
||||
on:
|
||||
push:
|
||||
tags:
|
||||
- '*'
|
||||
|
||||
jobs:
|
||||
dappnode-update-beacon-chain:
|
||||
name: Trigger a beacon-chain release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get latest tag
|
||||
id: get_tag
|
||||
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
|
||||
- name: Send dispatch event to DAppNodePackage-prysm-beacon-chain
|
||||
env:
|
||||
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-beacon-chain
|
||||
run: |
|
||||
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
|
||||
-H "Accept: application/vnd.github.everest-preview+json" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
|
||||
https://api.github.com/repos/$DISPATCH_REPO/dispatches
|
||||
|
||||
dappnode-update-validator:
|
||||
name: Trigger a validator release
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Get latest tag
|
||||
id: get_tag
|
||||
run: echo ::set-output name=TAG::${GITHUB_REF/refs\/tags\//}
|
||||
- name: Send dispatch event to DAppNodePackage validator repository
|
||||
env:
|
||||
DISPATCH_REPO: dappnode/DAppNodePackage-prysm-validator
|
||||
run: |
|
||||
curl -v -X POST -u "${{ secrets.PAT_GITHUB }}" \
|
||||
-H "Accept: application/vnd.github.everest-preview+json" \
|
||||
-H "Content-Type: application/json" \
|
||||
--data '{"event_type":"new_release", "client_payload": { "tag":"${{ steps.get_tag.outputs.TAG }}"}}' \
|
||||
https://api.github.com/repos/$DISPATCH_REPO/dispatches
|
||||
45
.github/workflows/go.yml
vendored
45
.github/workflows/go.yml
vendored
@@ -7,13 +7,12 @@ on:
|
||||
branches: [ '*' ]
|
||||
|
||||
jobs:
|
||||
|
||||
check:
|
||||
name: Check
|
||||
formatting:
|
||||
name: Formatting
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v1
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Go mod tidy checker
|
||||
id: gomodtidy
|
||||
@@ -31,15 +30,43 @@ jobs:
|
||||
with:
|
||||
goimports-path: ./
|
||||
|
||||
- name: Gosec security scanner
|
||||
uses: securego/gosec@master
|
||||
gosec:
|
||||
name: Gosec scan
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
args: '-exclude=G307 -exclude-dir=crypto/bls/herumi ./...'
|
||||
go-version: 1.17
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
go install github.com/securego/gosec/v2/cmd/gosec@latest
|
||||
gosec -exclude=G307 -exclude-dir=crypto/bls/herumi ./...
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Set up Go 1.17
|
||||
uses: actions/setup-go@v3
|
||||
with:
|
||||
go-version: 1.17
|
||||
id: go
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v2
|
||||
with:
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto
|
||||
args: --print-issued-lines --sort-results --no-config --timeout=10m --disable-all -E deadcode -E errcheck -E gosimple --skip-files=validator/web/site_data.go --skip-dirs=proto --go=1.17
|
||||
version: v1.45.2
|
||||
skip-go-installation: true
|
||||
|
||||
build:
|
||||
name: Build
|
||||
@@ -48,7 +75,7 @@ jobs:
|
||||
- name: Set up Go 1.x
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: ^1.14
|
||||
go-version: 1.17
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
22
.github/workflows/horusec.yaml
vendored
Normal file
22
.github/workflows/horusec.yaml
vendored
Normal file
@@ -0,0 +1,22 @@
|
||||
name: Horusec Security Scan
|
||||
|
||||
on:
|
||||
schedule:
|
||||
# Runs cron at 16.00 UTC on
|
||||
- cron: '0 0 * * SUN'
|
||||
|
||||
jobs:
|
||||
Horusec_Scan:
|
||||
name: horusec-Scan
|
||||
runs-on: ubuntu-latest
|
||||
if: github.ref == 'refs/heads/develop'
|
||||
steps:
|
||||
- name: Check out code
|
||||
uses: actions/checkout@v2
|
||||
with: # Required when commit authors is enabled
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
@@ -40,15 +40,15 @@ func (od *OriginData) CheckpointString() string {
|
||||
// SaveBlock saves the downloaded block to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveBlock(dir string) (string, error) {
|
||||
blockPath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.BlockRoot))
|
||||
return blockPath, file.WriteFile(blockPath, od.sb)
|
||||
blockPath := path.Join(dir, fname("block", od.cf, od.b.Block().Slot(), od.wsd.BlockRoot))
|
||||
return blockPath, file.WriteFile(blockPath, od.BlockBytes())
|
||||
}
|
||||
|
||||
// SaveState saves the downloaded state to a unique file in the given path.
|
||||
// For readability and collision avoidance, the file name includes: type, config name, slot and root
|
||||
func (od *OriginData) SaveState(dir string) (string, error) {
|
||||
statePath := path.Join(dir, fname("state", od.cf, od.st.Slot(), od.wsd.StateRoot))
|
||||
return statePath, file.WriteFile(statePath, od.sb)
|
||||
return statePath, file.WriteFile(statePath, od.StateBytes())
|
||||
}
|
||||
|
||||
// StateBytes returns the ssz-encoded bytes of the downloaded BeaconState value.
|
||||
|
||||
@@ -132,6 +132,15 @@ func validHostname(h string) (string, error) {
|
||||
return fmt.Sprintf("%s:%s", host, port), nil
|
||||
}
|
||||
|
||||
// NodeURL returns a human-readable string representation of the beacon node base url.
|
||||
func (c *Client) NodeURL() string {
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
Host: c.host,
|
||||
}
|
||||
return u.String()
|
||||
}
|
||||
|
||||
func (c *Client) urlForPath(methodPath string) *url.URL {
|
||||
u := &url.URL{
|
||||
Scheme: c.scheme,
|
||||
|
||||
@@ -48,15 +48,16 @@ go_library(
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
"//beacon-chain/forkchoice:go_default_library",
|
||||
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
|
||||
"//beacon-chain/forkchoice/protoarray:go_default_library",
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//beacon-chain/operations/attestations:go_default_library",
|
||||
"//beacon-chain/operations/slashings:go_default_library",
|
||||
"//beacon-chain/operations/voluntaryexits:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
@@ -106,7 +107,6 @@ go_test(
|
||||
"init_test.go",
|
||||
"log_test.go",
|
||||
"metrics_test.go",
|
||||
"mock_engine_test.go",
|
||||
"mock_test.go",
|
||||
"optimistic_sync_test.go",
|
||||
"pow_block_test.go",
|
||||
@@ -130,6 +130,7 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//beacon-chain/state/stateutil:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
@@ -183,6 +184,7 @@ go_test(
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/p2p:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
|
||||
@@ -328,10 +328,10 @@ func (s *Service) IsOptimistic(ctx context.Context) (bool, error) {
|
||||
return s.IsOptimisticForRoot(ctx, s.head.root)
|
||||
}
|
||||
|
||||
// IsOptimisticForRoot takes the root and slot as aguments instead of the current head
|
||||
// IsOptimisticForRoot takes the root and slot as arguments instead of the current head
|
||||
// and returns true if it is optimistic.
|
||||
func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool, error) {
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(root)
|
||||
if err == nil {
|
||||
return optimistic, nil
|
||||
}
|
||||
@@ -358,11 +358,26 @@ func (s *Service) IsOptimisticForRoot(ctx context.Context, root [32]byte) (bool,
|
||||
return false, nil
|
||||
}
|
||||
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, bytesutil.ToBytes32(validatedCheckpoint.Root))
|
||||
// checkpoint root could be zeros before the first finalized epoch. Use genesis root if the case.
|
||||
lastValidated, err := s.cfg.BeaconDB.StateSummary(ctx, s.ensureRootNotZeros(bytesutil.ToBytes32(validatedCheckpoint.Root)))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return ss.Slot > lastValidated.Slot, nil
|
||||
if lastValidated == nil {
|
||||
return false, errInvalidNilSummary
|
||||
}
|
||||
|
||||
if ss.Slot > lastValidated.Slot {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
isCanonical, err := s.IsCanonical(ctx, root)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// historical non-canonical blocks here are returned as optimistic for safety.
|
||||
return !isCanonical, nil
|
||||
}
|
||||
|
||||
// SetGenesisTime sets the genesis time of beacon chain.
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestHeadSlot_DataRace(t *testing.T) {
|
||||
@@ -16,10 +17,13 @@ func TestHeadSlot_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
wait := make(chan struct{})
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}()
|
||||
s.HeadSlot()
|
||||
<-wait
|
||||
@@ -31,12 +35,16 @@ func TestHeadRoot_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{root: [32]byte{'A'}},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err := s.HeadRoot(context.Background())
|
||||
_, err = s.HeadRoot(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
@@ -49,10 +57,14 @@ func TestHeadBlock_DataRace(t *testing.T) {
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
head: &head{block: wsb},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err = s.HeadBlock(context.Background())
|
||||
require.NoError(t, err)
|
||||
@@ -64,12 +76,16 @@ func TestHeadState_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, StateGen: stategen.New(beaconDB)},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
wait := make(chan struct{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
go func() {
|
||||
defer close(wait)
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
|
||||
}()
|
||||
_, err := s.HeadState(context.Background())
|
||||
_, err = s.HeadState(context.Background())
|
||||
require.NoError(t, err)
|
||||
<-wait
|
||||
}
|
||||
|
||||
@@ -459,7 +459,7 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 11
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
@@ -477,18 +477,39 @@ func TestService_IsOptimisticForRoot_DB_ProtoArray(t *testing.T) {
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: validatedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
|
||||
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
|
||||
func TestService_IsOptimisticForRoot_DB_DoublyLinkedTree(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
@@ -503,7 +524,71 @@ func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 11
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
validatedBlock := util.NewBeaconBlock()
|
||||
validatedBlock.Block.Slot = 9
|
||||
validatedRoot, err := validatedBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(validatedBlock)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
|
||||
validatedCheckpoint := ðpb.Checkpoint{Root: br[:]}
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
_, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.ErrorContains(t, "nil summary returned from the DB", err)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err := c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
cp := ðpb.Checkpoint{
|
||||
Epoch: 1,
|
||||
Root: validatedRoot[:],
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validatedRoot))
|
||||
require.NoError(t, beaconDB.SaveFinalizedCheckpoint(ctx, cp))
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
|
||||
// Before the first finalized epoch, finalized root could be zeros.
|
||||
validatedCheckpoint = ðpb.Checkpoint{Root: params.BeaconConfig().ZeroHash[:]}
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, br))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: params.BeaconConfig().ZeroHash[:], Slot: 10}))
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, validatedCheckpoint))
|
||||
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: optimisticRoot[:], Slot: 11}))
|
||||
optimistic, err = c.IsOptimisticForRoot(ctx, optimisticRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, optimistic)
|
||||
}
|
||||
|
||||
func TestService_IsOptimisticForRoot_DB_non_canonical(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
ctx := context.Background()
|
||||
c := &Service{cfg: &config{BeaconDB: beaconDB, ForkChoiceStore: doublylinkedtree.New(0, 0)}, head: &head{slot: 101, root: [32]byte{'b'}}}
|
||||
c.head = &head{root: params.BeaconConfig().ZeroHash}
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 10
|
||||
br, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: br[:], Slot: 10}))
|
||||
|
||||
optimisticBlock := util.NewBeaconBlock()
|
||||
optimisticBlock.Block.Slot = 97
|
||||
optimisticRoot, err := optimisticBlock.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err = wrapper.WrappedSignedBeaconBlock(optimisticBlock)
|
||||
@@ -529,5 +614,6 @@ func TestService_IsOptimisticForRoot__DB_DoublyLinkedTree(t *testing.T) {
|
||||
require.NoError(t, beaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Root: validatedRoot[:], Slot: 9}))
|
||||
validated, err := c.IsOptimisticForRoot(ctx, validatedRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, validated)
|
||||
require.Equal(t, true, validated)
|
||||
|
||||
}
|
||||
|
||||
@@ -13,7 +13,9 @@ var (
|
||||
errInvalidNilSummary = errors.New("nil summary returned from the DB")
|
||||
// errNilParentInDB is returned when a nil parent block is returned from the DB.
|
||||
errNilParentInDB = errors.New("nil parent block in DB")
|
||||
// errWrongBlockCound is returned when the wrong number of blocks or
|
||||
// errWrongBlockCount is returned when the wrong number of blocks or
|
||||
// block roots is used
|
||||
errWrongBlockCount = errors.New("wrong number of blocks or block roots")
|
||||
// block is not a valid optimistic candidate block
|
||||
errNotOptimisticCandidate = errors.New("block is not suitable for optimistic sync")
|
||||
)
|
||||
|
||||
@@ -24,8 +24,9 @@ import (
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
// UpdateHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
|
||||
// UpdateAndSaveHeadWithBalances updates the beacon state head after getting justified balanced from cache.
|
||||
// This function is only used in spec-tests, it does save the head after updating it.
|
||||
func (s *Service) UpdateAndSaveHeadWithBalances(ctx context.Context) error {
|
||||
cp := s.store.JustifiedCheckpt()
|
||||
if cp == nil {
|
||||
return errors.New("no justified checkpoint")
|
||||
@@ -35,8 +36,19 @@ func (s *Service) UpdateHeadWithBalances(ctx context.Context) error {
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", cp.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
|
||||
return s.updateHead(ctx, balances)
|
||||
headRoot, err := s.updateHead(ctx, balances)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not update head")
|
||||
}
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
return s.saveHead(ctx, headRoot, headBlock, headState)
|
||||
}
|
||||
|
||||
// This defines the current chain service's view of head.
|
||||
@@ -49,18 +61,18 @@ type head struct {
|
||||
|
||||
// Determined the head from the fork choice service and saves its new data
|
||||
// (head root, head block, and head state) to the local service cache.
|
||||
func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
func (s *Service) updateHead(ctx context.Context, balances []uint64) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.updateHead")
|
||||
defer span.End()
|
||||
|
||||
// Get head from the fork choice service.
|
||||
f := s.store.FinalizedCheckpt()
|
||||
if f == nil {
|
||||
return errNilFinalizedInStore
|
||||
return [32]byte{}, errNilFinalizedInStore
|
||||
}
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return errNilJustifiedInStore
|
||||
return [32]byte{}, errNilJustifiedInStore
|
||||
}
|
||||
// To get head before the first justified epoch, the fork choice will start with origin root
|
||||
// instead of zero hashes.
|
||||
@@ -76,7 +88,7 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
if !s.cfg.ForkChoiceStore.HasNode(headStartRoot) {
|
||||
jb, err := s.cfg.BeaconDB.Block(ctx, headStartRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
s.cfg.ForkChoiceStore = doublylinkedtree.New(j.Epoch, f.Epoch)
|
||||
@@ -84,22 +96,16 @@ func (s *Service) updateHead(ctx context.Context, balances []uint64) error {
|
||||
s.cfg.ForkChoiceStore = protoarray.New(j.Epoch, f.Epoch, bytesutil.ToBytes32(f.Root))
|
||||
}
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, jb.Block(), headStartRoot, f, j); err != nil {
|
||||
return err
|
||||
return [32]byte{}, err
|
||||
}
|
||||
}
|
||||
|
||||
headRoot, err := s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Save head to the local service cache.
|
||||
return s.saveHead(ctx, headRoot)
|
||||
return s.cfg.ForkChoiceStore.Head(ctx, j.Epoch, headStartRoot, balances, f.Epoch)
|
||||
}
|
||||
|
||||
// This saves head info to the local service cache, it also saves the
|
||||
// new head root to the DB.
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
func (s *Service) saveHead(ctx context.Context, headRoot [32]byte, headBlock block.SignedBeaconBlock, headState state.BeaconState) error {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.saveHead")
|
||||
defer span.End()
|
||||
|
||||
@@ -111,6 +117,12 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
if headRoot == bytesutil.ToBytes32(r) {
|
||||
return nil
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(headBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
if headState == nil || headState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// If the head state is not available, just return nil.
|
||||
// There's nothing to cache
|
||||
@@ -118,31 +130,13 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Get the new head block from DB.
|
||||
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(newHeadBlock); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Get the new head state from cached state or DB.
|
||||
newHeadState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not retrieve head state in DB")
|
||||
}
|
||||
if newHeadState == nil || newHeadState.IsNil() {
|
||||
return errors.New("cannot save nil head state")
|
||||
}
|
||||
|
||||
// A chain re-org occurred, so we fire an event notifying the rest of the services.
|
||||
headSlot := s.HeadSlot()
|
||||
newHeadSlot := newHeadBlock.Block().Slot()
|
||||
newHeadSlot := headBlock.Block().Slot()
|
||||
oldHeadRoot := s.headRoot()
|
||||
oldStateRoot := s.headBlock().Block().StateRoot()
|
||||
newStateRoot := newHeadBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(newHeadBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
newStateRoot := headBlock.Block().StateRoot()
|
||||
if bytesutil.ToBytes32(headBlock.Block().ParentRoot()) != bytesutil.ToBytes32(r) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"newSlot": fmt.Sprintf("%d", newHeadSlot),
|
||||
"oldSlot": fmt.Sprintf("%d", headSlot),
|
||||
@@ -174,7 +168,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
}
|
||||
|
||||
// Cache the new head info.
|
||||
s.setHead(headRoot, newHeadBlock, newHeadState)
|
||||
s.setHead(headRoot, headBlock, headState)
|
||||
|
||||
// Save the new head root to DB.
|
||||
if err := s.cfg.BeaconDB.SaveHeadBlockRoot(ctx, headRoot); err != nil {
|
||||
@@ -184,7 +178,7 @@ func (s *Service) saveHead(ctx context.Context, headRoot [32]byte) error {
|
||||
// Forward an event capturing a new chain head over a common event feed
|
||||
// done in a goroutine to avoid blocking the critical runtime main routine.
|
||||
go func() {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, newHeadState, newStateRoot, headRoot[:]); err != nil {
|
||||
if err := s.notifyNewHeadEvent(ctx, newHeadSlot, headState, newStateRoot, headRoot[:]); err != nil {
|
||||
log.WithError(err).Error("Could not notify event feed of new chain head")
|
||||
}
|
||||
}()
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
ethpbv1 "github.com/prysmaticlabs/prysm/proto/eth/v1"
|
||||
@@ -27,8 +29,10 @@ func TestSaveHead_Same(t *testing.T) {
|
||||
|
||||
r := [32]byte{'A'}
|
||||
service.head = &head{slot: 0, root: r}
|
||||
|
||||
require.NoError(t, service.saveHead(context.Background(), r))
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), r, b, st))
|
||||
assert.Equal(t, types.Slot(0), service.headSlot(), "Head did not stay the same")
|
||||
assert.Equal(t, r, service.headRoot(), "Head did not stay the same")
|
||||
}
|
||||
@@ -66,7 +70,7 @@ func TestSaveHead_Different(t *testing.T) {
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
@@ -112,7 +116,7 @@ func TestSaveHead_Different_Reorg(t *testing.T) {
|
||||
require.NoError(t, headState.SetSlot(1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(context.Background(), ðpb.StateSummary{Slot: 1, Root: newRoot[:]}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(context.Background(), headState, newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot))
|
||||
require.NoError(t, service.saveHead(context.Background(), newRoot, wsb, headState))
|
||||
|
||||
assert.Equal(t, types.Slot(1), service.HeadSlot(), "Head did not change")
|
||||
|
||||
@@ -154,8 +158,10 @@ func TestUpdateHead_MissingJustifiedRoot(t *testing.T) {
|
||||
service.store.SetJustifiedCheckpt(ðpb.Checkpoint{Root: r[:]})
|
||||
service.store.SetFinalizedCheckpt(ðpb.Checkpoint{})
|
||||
service.store.SetBestJustifiedCheckpt(ðpb.Checkpoint{})
|
||||
|
||||
require.NoError(t, service.updateHead(context.Background(), []uint64{}))
|
||||
headRoot, err := service.updateHead(context.Background(), []uint64{})
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, service.saveHead(context.Background(), headRoot, wsb, st))
|
||||
}
|
||||
|
||||
func Test_notifyNewHeadEvent(t *testing.T) {
|
||||
@@ -279,3 +285,43 @@ func TestSaveOrphanedAtts_CanFilter(t *testing.T) {
|
||||
atts := b.Block.Body.Attestations
|
||||
require.DeepNotSSZEqual(t, atts, savedAtts)
|
||||
}
|
||||
|
||||
func TestUpdateHead_noSavedChanges(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
fcs := doublylinkedtree.New(0, 0)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlockBellatrix())
|
||||
require.NoError(t, err)
|
||||
bellatrixBlkRoot, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, bellatrixBlk))
|
||||
fcp := ðpb.Checkpoint{
|
||||
Root: bellatrixBlkRoot[:],
|
||||
Epoch: 1,
|
||||
}
|
||||
service.store.SetFinalizedCheckpt(fcp)
|
||||
service.store.SetJustifiedCheckpt(fcp)
|
||||
require.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, bellatrixBlkRoot))
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
require.NoError(t, beaconDB.SaveState(ctx, bellatrixState, bellatrixBlkRoot))
|
||||
service.cfg.StateGen.SaveFinalizedState(0, bellatrixBlkRoot, bellatrixState)
|
||||
|
||||
headRoot := service.headRoot()
|
||||
require.Equal(t, [32]byte{}, headRoot)
|
||||
|
||||
newRoot, err := service.updateHead(ctx, []uint64{1, 2})
|
||||
require.NoError(t, err)
|
||||
require.NotEqual(t, headRoot, newRoot)
|
||||
require.Equal(t, headRoot, service.headRoot())
|
||||
}
|
||||
|
||||
@@ -1,49 +0,0 @@
|
||||
package blockchain
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/pkg/errors"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
)
|
||||
|
||||
type mockEngineService struct {
|
||||
newPayloadError error
|
||||
forkchoiceError error
|
||||
blks map[[32]byte]*enginev1.ExecutionBlock
|
||||
}
|
||||
|
||||
func (m *mockEngineService) NewPayload(context.Context, *enginev1.ExecutionPayload) ([]byte, error) {
|
||||
return nil, m.newPayloadError
|
||||
}
|
||||
|
||||
func (m *mockEngineService) ForkchoiceUpdated(context.Context, *enginev1.ForkchoiceState, *enginev1.PayloadAttributes) (*enginev1.PayloadIDBytes, []byte, error) {
|
||||
return nil, nil, m.forkchoiceError
|
||||
}
|
||||
|
||||
func (*mockEngineService) GetPayloadV1(
|
||||
_ context.Context, _ enginev1.PayloadIDBytes,
|
||||
) *enginev1.ExecutionPayload {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) GetPayload(context.Context, [8]byte) (*enginev1.ExecutionPayload, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) ExchangeTransitionConfiguration(context.Context, *enginev1.TransitionConfiguration) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (*mockEngineService) LatestExecutionBlock(context.Context) (*enginev1.ExecutionBlock, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (m *mockEngineService) ExecutionBlockByHash(_ context.Context, hash common.Hash) (*enginev1.ExecutionBlock, error) {
|
||||
blk, ok := m.blks[common.BytesToHash(hash.Bytes())]
|
||||
if !ok {
|
||||
return nil, errors.New("block not found")
|
||||
}
|
||||
return blk, nil
|
||||
}
|
||||
@@ -86,9 +86,9 @@ func TestService_newSlot(t *testing.T) {
|
||||
for _, test := range tests {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
store := store.New(test.args.justified, test.args.finalized)
|
||||
store.SetBestJustifiedCheckpt(test.args.bestJustified)
|
||||
service.store = store
|
||||
s := store.New(test.args.justified, test.args.finalized)
|
||||
s.SetBestJustifiedCheckpt(test.args.bestJustified)
|
||||
service.store = s
|
||||
|
||||
require.NoError(t, service.NewSlot(ctx, test.args.slot))
|
||||
if test.args.shouldEqual {
|
||||
|
||||
@@ -5,15 +5,21 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
@@ -21,7 +27,7 @@ import (
|
||||
// notifyForkchoiceUpdate signals execution engine the fork choice updates. Execution engine should:
|
||||
// 1. Re-organizes the execution payload chain and corresponding state to make head_block_hash the head.
|
||||
// 2. Applies finality to the execution state: it irreversibly persists the chain of all execution payloads and corresponding state, up to and including finalized_block_hash.
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headState state.BeaconState, headBlk block.BeaconBlock, headRoot [32]byte, finalizedRoot [32]byte) (*enginev1.PayloadIDBytes, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyForkchoiceUpdate")
|
||||
defer span.End()
|
||||
|
||||
@@ -29,10 +35,7 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
return nil, errors.New("nil head block")
|
||||
}
|
||||
// Must not call fork choice updated until the transition conditions are met on the Pow network.
|
||||
if isPreBellatrix(headBlk.Version()) {
|
||||
return nil, nil
|
||||
}
|
||||
isExecutionBlk, err := blocks.ExecutionBlock(headBlk.Body())
|
||||
isExecutionBlk, err := blocks.IsExecutionBlock(headBlk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not determine if block is execution block")
|
||||
}
|
||||
@@ -43,36 +46,31 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
finalizedBlock, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
|
||||
finalizedHash, err := s.getFinalizedPayloadHash(ctx, finalizedRoot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
var finalizedHash []byte
|
||||
if isPreBellatrix(finalizedBlock.Block().Version()) {
|
||||
finalizedHash = params.BeaconConfig().ZeroHash[:]
|
||||
} else {
|
||||
payload, err := finalizedBlock.Block().Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get finalized block execution payload")
|
||||
}
|
||||
finalizedHash = payload.BlockHash
|
||||
return nil, errors.Wrap(err, "could not get finalized payload hash")
|
||||
}
|
||||
|
||||
fcs := &enginev1.ForkchoiceState{
|
||||
HeadBlockHash: headPayload.BlockHash,
|
||||
SafeBlockHash: headPayload.BlockHash,
|
||||
FinalizedBlockHash: finalizedHash,
|
||||
FinalizedBlockHash: finalizedHash[:],
|
||||
}
|
||||
|
||||
// payload attribute is only required when requesting payload, here we are just updating fork choice, so it is nil.
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, nil /*payload attribute*/)
|
||||
nextSlot := s.CurrentSlot() + 1 // Cache payload ID for next slot proposer.
|
||||
hasAttr, attr, proposerId, err := s.getPayloadAttribute(ctx, headState, nextSlot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get payload attribute")
|
||||
}
|
||||
|
||||
payloadID, _, err := s.cfg.ExecutionEngineCaller.ForkchoiceUpdated(ctx, fcs, attr)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case v1.ErrAcceptedSyncingPayloadStatus:
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
log.WithFields(logrus.Fields{
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash)),
|
||||
"headSlot": headBlk.Slot(),
|
||||
"headPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(headPayload.BlockHash)),
|
||||
"finalizedPayloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(finalizedHash[:])),
|
||||
}).Info("Called fork choice updated with optimistic block")
|
||||
return payloadID, nil
|
||||
default:
|
||||
@@ -82,72 +80,131 @@ func (s *Service) notifyForkchoiceUpdate(ctx context.Context, headBlk block.Beac
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, headRoot); err != nil {
|
||||
return nil, errors.Wrap(err, "could not set block to valid")
|
||||
}
|
||||
if hasAttr { // If the forkchoice update call has an attribute, update the proposer payload ID cache.
|
||||
var pId [8]byte
|
||||
copy(pId[:], payloadID[:])
|
||||
s.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(nextSlot, proposerId, pId)
|
||||
}
|
||||
return payloadID, nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload
|
||||
// getFinalizedPayloadHash returns the finalized payload hash for the given finalized block root.
|
||||
// It checks the following in order:
|
||||
// 1. The finalized block exists in db
|
||||
// 2. The finalized block exists in initial sync block cache
|
||||
// 3. The finalized block is the weak subjectivity block and exists in db
|
||||
// Error is returned if the finalized block is not found from above.
|
||||
func (s *Service) getFinalizedPayloadHash(ctx context.Context, finalizedRoot [32]byte) ([32]byte, error) {
|
||||
b, err := s.cfg.BeaconDB.Block(ctx, s.ensureRootNotZeros(finalizedRoot))
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
if b != nil {
|
||||
return getPayloadHash(b.Block())
|
||||
}
|
||||
|
||||
b = s.getInitSyncBlock(finalizedRoot)
|
||||
if b != nil {
|
||||
return getPayloadHash(b.Block())
|
||||
}
|
||||
|
||||
r, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
b, err = s.cfg.BeaconDB.Block(ctx, r)
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get finalized block")
|
||||
}
|
||||
if b != nil {
|
||||
return getPayloadHash(b.Block())
|
||||
}
|
||||
|
||||
return [32]byte{}, errors.Errorf("finalized block with root %#x does not exist in the db or our cache", s.ensureRootNotZeros(finalizedRoot))
|
||||
}
|
||||
|
||||
// getPayloadHash returns the payload hash for the input given block.
|
||||
// zeros are returned if the block is older than bellatrix.
|
||||
func getPayloadHash(b block.BeaconBlock) ([32]byte, error) {
|
||||
if blocks.IsPreBellatrixVersion(b.Version()) {
|
||||
return params.BeaconConfig().ZeroHash, nil
|
||||
}
|
||||
|
||||
payload, err := b.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return [32]byte{}, errors.Wrap(err, "could not get finalized block execution payload")
|
||||
}
|
||||
return bytesutil.ToBytes32(payload.BlockHash), nil
|
||||
}
|
||||
|
||||
// notifyForkchoiceUpdate signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion, postStateVersion int,
|
||||
preStateHeader, postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock, root [32]byte) error {
|
||||
preStateHeader, postStateHeader *ethpb.ExecutionPayloadHeader, blk block.SignedBeaconBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if isPreBellatrix(postStateVersion) {
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
|
||||
if blocks.IsPreBellatrixVersion(postStateVersion) {
|
||||
return true, nil
|
||||
}
|
||||
if err := helpers.BeaconBlockIsNil(blk); err != nil {
|
||||
return err
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(postStateHeader, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not determine if execution is enabled")
|
||||
return false, errors.Wrap(err, "could not determine if execution is enabled")
|
||||
}
|
||||
if !enabled {
|
||||
return s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root)
|
||||
return true, nil
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get execution payload")
|
||||
return false, errors.Wrap(err, "could not get execution payload")
|
||||
}
|
||||
_, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
lastValidHash, err := s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload)
|
||||
if err != nil {
|
||||
switch err {
|
||||
case v1.ErrAcceptedSyncingPayloadStatus:
|
||||
case powchain.ErrAcceptedSyncingPayloadStatus:
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"blockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash)),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
return nil
|
||||
return false, nil
|
||||
case powchain.ErrInvalidPayloadStatus:
|
||||
root, err := blk.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
invalidRoots, err := s.ForkChoicer().SetOptimisticToInvalid(ctx, root, bytesutil.ToBytes32(lastValidHash))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := s.removeInvalidBlockAndState(ctx, invalidRoots); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return false, errors.New("could not validate an INVALID payload from execution engine")
|
||||
default:
|
||||
return errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
return false, errors.Wrap(err, "could not validate execution payload from execution engine")
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic status")
|
||||
}
|
||||
|
||||
// During the transition event, the transition block should be verified for sanity.
|
||||
if isPreBellatrix(preStateVersion) {
|
||||
if blocks.IsPreBellatrixVersion(preStateVersion) {
|
||||
// Handle case where pre-state is Altair but block contains payload.
|
||||
// To reach here, the block must have contained a valid payload.
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
return true, s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(preStateHeader, body)
|
||||
atTransition, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(preStateHeader, body)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if merge block is terminal")
|
||||
return true, errors.Wrap(err, "could not check if merge block is terminal")
|
||||
}
|
||||
if !atTransition {
|
||||
return nil
|
||||
return true, nil
|
||||
}
|
||||
return s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// isPreBellatrix returns true if input version is before bellatrix fork.
|
||||
func isPreBellatrix(v int) bool {
|
||||
return v == version.Phase0 || v == version.Altair
|
||||
return true, s.validateMergeBlock(ctx, blk)
|
||||
}
|
||||
|
||||
// optimisticCandidateBlock returns true if this block can be optimistically synced.
|
||||
@@ -157,10 +214,6 @@ func isPreBellatrix(v int) bool {
|
||||
// if is_execution_block(opt_store.blocks[block.parent_root]):
|
||||
// return True
|
||||
//
|
||||
// justified_root = opt_store.block_states[opt_store.head_block_root].current_justified_checkpoint.root
|
||||
// if is_execution_block(opt_store.blocks[justified_root]):
|
||||
// return True
|
||||
//
|
||||
// if block.slot + SAFE_SLOTS_TO_IMPORT_OPTIMISTICALLY <= current_slot:
|
||||
// return True
|
||||
//
|
||||
@@ -178,21 +231,75 @@ func (s *Service) optimisticCandidateBlock(ctx context.Context, blk block.Beacon
|
||||
return false, errNilParentInDB
|
||||
}
|
||||
|
||||
parentIsExecutionBlock, err := blocks.ExecutionBlock(parent.Block().Body())
|
||||
parentIsExecutionBlock, err := blocks.IsExecutionBlock(parent.Block().Body())
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if parentIsExecutionBlock {
|
||||
return true, nil
|
||||
return parentIsExecutionBlock, nil
|
||||
}
|
||||
|
||||
// getPayloadAttributes returns the payload attributes for the given state and slot.
|
||||
// The attribute is required to initiate a payload build process in the context of an `engine_forkchoiceUpdated` call.
|
||||
func (s *Service) getPayloadAttribute(ctx context.Context, st state.BeaconState, slot types.Slot) (bool, *enginev1.PayloadAttributes, types.ValidatorIndex, error) {
|
||||
proposerID, _, ok := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(slot)
|
||||
if !ok { // There's no need to build attribute if there is no proposer for slot.
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
j := s.store.JustifiedCheckpt()
|
||||
if j == nil {
|
||||
return false, errNilJustifiedInStore
|
||||
}
|
||||
jBlock, err := s.cfg.BeaconDB.Block(ctx, bytesutil.ToBytes32(j.Root))
|
||||
// Get previous randao.
|
||||
st = st.Copy()
|
||||
st, err := transition.ProcessSlotsIfPossible(ctx, st, slot)
|
||||
if err != nil {
|
||||
return false, err
|
||||
return false, nil, 0, err
|
||||
}
|
||||
return blocks.ExecutionBlock(jBlock.Block().Body())
|
||||
prevRando, err := helpers.RandaoMix(st, time.CurrentEpoch(st))
|
||||
if err != nil {
|
||||
return false, nil, 0, nil
|
||||
}
|
||||
|
||||
// Get fee recipient.
|
||||
feeRecipient := params.BeaconConfig().DefaultFeeRecipient
|
||||
recipient, err := s.cfg.BeaconDB.FeeRecipientByValidatorID(ctx, proposerID)
|
||||
switch {
|
||||
case errors.Is(err, kv.ErrNotFoundFeeRecipient):
|
||||
if feeRecipient.String() == fieldparams.EthBurnAddressHex {
|
||||
logrus.WithFields(logrus.Fields{
|
||||
"validatorIndex": proposerID,
|
||||
"burnAddress": fieldparams.EthBurnAddressHex,
|
||||
}).Error("Fee recipient not set. Using burn address")
|
||||
}
|
||||
case err != nil:
|
||||
return false, nil, 0, errors.Wrap(err, "could not get fee recipient in db")
|
||||
default:
|
||||
feeRecipient = recipient
|
||||
}
|
||||
|
||||
// Get timestamp.
|
||||
t, err := slots.ToTime(uint64(s.genesisTime.Unix()), slot)
|
||||
if err != nil {
|
||||
return false, nil, 0, err
|
||||
}
|
||||
attr := &enginev1.PayloadAttributes{
|
||||
Timestamp: uint64(t.Unix()),
|
||||
PrevRandao: prevRando,
|
||||
SuggestedFeeRecipient: feeRecipient.Bytes(),
|
||||
}
|
||||
return true, attr, proposerID, nil
|
||||
}
|
||||
|
||||
// removeInvalidBlockAndState removes the invalid block and its corresponding state from the cache and DB.
|
||||
func (s *Service) removeInvalidBlockAndState(ctx context.Context, blkRoots [][32]byte) error {
|
||||
for _, root := range blkRoots {
|
||||
if err := s.cfg.StateGen.DeleteStateFromCaches(ctx, root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Delete block also deletes the state as well.
|
||||
if err := s.cfg.BeaconDB.DeleteBlock(ctx, root); err != nil {
|
||||
// TODO(10487): If a caller requests to delete a root that's justified and finalized. We should gracefully shutdown.
|
||||
// This is an irreparable condition, it would me a justified or finalized block has become invalid.
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -5,9 +5,14 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
engine "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
@@ -18,9 +23,11 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
@@ -41,8 +48,13 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(fcs),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
state: st,
|
||||
}
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
@@ -131,7 +143,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newForkchoiceErr: engine.ErrAcceptedSyncingPayloadStatus,
|
||||
newForkchoiceErr: powchain.ErrAcceptedSyncingPayloadStatus,
|
||||
finalizedRoot: bellatrixBlkRoot,
|
||||
},
|
||||
{
|
||||
@@ -145,7 +157,7 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
newForkchoiceErr: engine.ErrInvalidPayloadStatus,
|
||||
newForkchoiceErr: powchain.ErrInvalidPayloadStatus,
|
||||
finalizedRoot: bellatrixBlkRoot,
|
||||
errString: "could not notify forkchoice update from execution engine: payload status is INVALID",
|
||||
},
|
||||
@@ -153,9 +165,9 @@ func Test_NotifyForkchoiceUpdate(t *testing.T) {
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
engine := &mockEngineService{forkchoiceError: tt.newForkchoiceErr}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
service.cfg.ExecutionEngineCaller = &mockPOW.EngineClient{ErrForkchoiceUpdated: tt.newForkchoiceErr}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
_, err := service.notifyForkchoiceUpdate(ctx, st, tt.blk, service.headRoot(), tt.finalizedRoot)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
@@ -181,13 +193,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{},
|
||||
},
|
||||
},
|
||||
}
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
@@ -195,55 +200,84 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
}
|
||||
altairBlk, err := wrapper.WrappedSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
bellatrixBlk, err := wrapper.WrappedSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
r, err := bellatrixBlk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, [32]byte{}, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 1, r, [32]byte{}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
preState state.BeaconState
|
||||
postState state.BeaconState
|
||||
blk block.SignedBeaconBlock
|
||||
newPayloadErr error
|
||||
errString string
|
||||
name string
|
||||
preState state.BeaconState
|
||||
postState state.BeaconState
|
||||
isValidPayload bool
|
||||
blk block.SignedBeaconBlock
|
||||
newPayloadErr error
|
||||
errString string
|
||||
}{
|
||||
{
|
||||
name: "phase 0 post state",
|
||||
postState: phase0State,
|
||||
preState: phase0State,
|
||||
name: "phase 0 post state",
|
||||
postState: phase0State,
|
||||
preState: phase0State,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "altair post state",
|
||||
postState: altairState,
|
||||
preState: altairState,
|
||||
name: "altair post state",
|
||||
postState: altairState,
|
||||
preState: altairState,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: engine.ErrAcceptedSyncingPayloadStatus,
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: powchain.ErrAcceptedSyncingPayloadStatus,
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with invalid block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: engine.ErrInvalidPayloadStatus,
|
||||
errString: "could not validate execution payload from execution engine: payload status is INVALID",
|
||||
name: "new payload with invalid block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
newPayloadErr: powchain.ErrInvalidPayloadStatus,
|
||||
errString: "could not validate an INVALID payload from execution engine",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "altair pre state, altair block",
|
||||
postState: bellatrixState,
|
||||
preState: altairState,
|
||||
blk: altairBlk,
|
||||
name: "altair pre state, altair block",
|
||||
postState: bellatrixState,
|
||||
preState: altairState,
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "altair pre state, happy case",
|
||||
@@ -263,13 +297,15 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "not at merge transition",
|
||||
@@ -296,13 +332,15 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
name: "could not get merge block",
|
||||
postState: bellatrixState,
|
||||
preState: bellatrixState,
|
||||
blk: bellatrixBlk,
|
||||
errString: "could not get merge block parent hash and total difficulty",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "happy case",
|
||||
@@ -322,20 +360,21 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
}(),
|
||||
isValidPayload: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
engine := &mockEngineService{newPayloadError: tt.newPayloadErr, blks: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
e := &mockPOW.EngineClient{ErrNewPayload: tt.newPayloadErr, BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
var payload *ethpb.ExecutionPayloadHeader
|
||||
if tt.preState.Version() == version.Bellatrix {
|
||||
payload, err = tt.preState.LatestExecutionPayloadHeader()
|
||||
@@ -345,11 +384,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 0, root, root, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
err = service.notifyNewPayload(ctx, tt.preState.Version(), postVersion, payload, postHeader, tt.blk, root)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, tt.preState.Version(), postVersion, payload, postHeader, tt.blk)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.isValidPayload, isValidPayload)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -381,27 +421,23 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
engine := &mockEngineService{blks: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
engine.blks[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
e := &mockPOW.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
e.BlockByHashMap[[32]byte{'b'}] = &v1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
payload, err := bellatrixState.LatestExecutionPayloadHeader()
|
||||
require.NoError(t, err)
|
||||
root := [32]byte{'c'}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertOptimisticBlock(ctx, 1, root, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
err = service.notifyNewPayload(ctx, bellatrixState.Version(), postVersion, payload, postHeader, bellatrixBlk, root)
|
||||
validated, err := service.notifyNewPayload(ctx, bellatrixState.Version(), postVersion, payload, postHeader, bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
optimistic, err := service.IsOptimisticForRoot(ctx, root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
@@ -441,7 +477,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
@@ -461,7 +497,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
blk := util.NewBeaconBlockAltair()
|
||||
blk.Block.Slot = 200
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedAltairBeaconBlock(blk.Block)
|
||||
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
@@ -481,7 +517,7 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 200
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
wr, err := wrapper.WrappedBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
@@ -495,42 +531,14 @@ func Test_IsOptimisticCandidateBlock(t *testing.T) {
|
||||
}(t),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "shallow block, execution enabled justified chkpt",
|
||||
blk: func(tt *testing.T) block.BeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 200
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
wr, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
justified: func(tt *testing.T) block.SignedBeaconBlock {
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 32
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
blk.Block.Body.ExecutionPayload.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
blk.Block.Body.ExecutionPayload.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
blk.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(tt, err)
|
||||
return wr
|
||||
}(t),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
jroot, err := tt.justified.Block().HashTreeRoot()
|
||||
jRoot, err := tt.justified.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, tt.justified))
|
||||
service.store.SetJustifiedCheckpt(
|
||||
ðpb.Checkpoint{
|
||||
Root: jroot[:],
|
||||
Root: jRoot[:],
|
||||
Epoch: slots.ToEpoch(tt.justified.Block().Slot()),
|
||||
})
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wrappedParentBlock))
|
||||
@@ -569,8 +577,8 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
BlockNumber: 100,
|
||||
}
|
||||
body := ðpb.BeaconBlockBodyBellatrix{ExecutionPayload: payload}
|
||||
block := ðpb.BeaconBlockBellatrix{Body: body, Slot: 200}
|
||||
rawSigned := ðpb.SignedBeaconBlockBellatrix{Block: block}
|
||||
b := ðpb.BeaconBlockBellatrix{Body: body, Slot: 200}
|
||||
rawSigned := ðpb.SignedBeaconBlockBellatrix{Block: b}
|
||||
blk := util.HydrateSignedBeaconBlockBellatrix(rawSigned)
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
@@ -589,3 +597,269 @@ func Test_IsOptimisticShallowExecutionParent(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, candidate)
|
||||
}
|
||||
|
||||
func Test_GetPayloadAttribute(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithProposerIdsCache(cache.NewProposerPayloadIDsCache()),
|
||||
}
|
||||
|
||||
// Cache miss
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
hasPayload, _, vId, err := service.getPayloadAttribute(ctx, nil, 0)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, hasPayload)
|
||||
require.Equal(t, types.ValidatorIndex(0), vId)
|
||||
|
||||
// Cache hit, advance state, no fee recipient
|
||||
suggestedVid := types.ValidatorIndex(1)
|
||||
slot := types.Slot(1)
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
hook := logTest.NewGlobal()
|
||||
hasPayload, attr, vId, err := service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, fieldparams.EthBurnAddressHex, common.BytesToAddress(attr.SuggestedFeeRecipient).String())
|
||||
require.LogsContain(t, hook, "Fee recipient not set. Using burn address")
|
||||
|
||||
// Cache hit, advance state, has fee recipient
|
||||
suggestedAddr := common.HexToAddress("123")
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveFeeRecipientsByValidatorIDs(ctx, []types.ValidatorIndex{suggestedVid}, []common.Address{suggestedAddr}))
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(slot, suggestedVid, [8]byte{})
|
||||
hasPayload, attr, vId, err = service.getPayloadAttribute(ctx, st, slot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, hasPayload)
|
||||
require.Equal(t, suggestedVid, vId)
|
||||
require.Equal(t, suggestedAddr, common.BytesToAddress(attr.SuggestedFeeRecipient))
|
||||
}
|
||||
|
||||
func Test_UpdateLastValidatedCheckpoint(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
params.OverrideBeaconConfig(params.MainnetConfig())
|
||||
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
stateGen := stategen.New(beaconDB)
|
||||
fcs := protoarray.New(0, 0, [32]byte{})
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stateGen),
|
||||
WithForkChoiceStore(fcs),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
genesisStateRoot := [32]byte{}
|
||||
genesisBlk := blocks.NewGenesisBlock(genesisStateRoot[:])
|
||||
wr, err := wrapper.WrappedSignedBeaconBlock(genesisBlk)
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
genesisRoot, err := genesisBlk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 0, genesisRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 0, 0))
|
||||
genesisSummary := ðpb.StateSummary{
|
||||
Root: genesisStateRoot[:],
|
||||
Slot: 0,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, genesisSummary))
|
||||
|
||||
// Get last validated checkpoint
|
||||
origCheckpoint, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveLastValidatedCheckpoint(ctx, origCheckpoint))
|
||||
|
||||
// Optimistic finalized checkpoint
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = 320
|
||||
blk.Block.ParentRoot = genesisRoot[:]
|
||||
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
opRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
opCheckpoint := ðpb.Checkpoint{
|
||||
Root: opRoot[:],
|
||||
Epoch: 10,
|
||||
}
|
||||
opStateSummary := ðpb.StateSummary{
|
||||
Root: opRoot[:],
|
||||
Slot: 320,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, opStateSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 320, opRoot, genesisRoot,
|
||||
params.BeaconConfig().ZeroHash, 10, 10))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, opRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, opCheckpoint))
|
||||
cp, err := service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, origCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, origCheckpoint.Epoch, cp.Epoch)
|
||||
|
||||
// Validated finalized checkpoint
|
||||
blk = util.NewBeaconBlock()
|
||||
blk.Block.Slot = 640
|
||||
blk.Block.ParentRoot = opRoot[:]
|
||||
wr, err = wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(ctx, wr))
|
||||
validRoot, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
validCheckpoint := ðpb.Checkpoint{
|
||||
Root: validRoot[:],
|
||||
Epoch: 20,
|
||||
}
|
||||
validSummary := ðpb.StateSummary{
|
||||
Root: validRoot[:],
|
||||
Slot: 640,
|
||||
}
|
||||
require.NoError(t, beaconDB.SaveStateSummary(ctx, validSummary))
|
||||
require.NoError(t, fcs.InsertOptimisticBlock(ctx, 640, validRoot, params.BeaconConfig().ZeroHash,
|
||||
params.BeaconConfig().ZeroHash, 20, 20))
|
||||
require.NoError(t, fcs.SetOptimisticToValid(ctx, validRoot))
|
||||
assert.NoError(t, beaconDB.SaveGenesisBlockRoot(ctx, validRoot))
|
||||
require.NoError(t, service.updateFinalized(ctx, validCheckpoint))
|
||||
cp, err = service.cfg.BeaconDB.LastValidatedCheckpoint(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
optimistic, err := service.IsOptimisticForRoot(ctx, validRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
require.DeepEqual(t, validCheckpoint.Root, cp.Root)
|
||||
require.Equal(t, validCheckpoint.Epoch, cp.Epoch)
|
||||
}
|
||||
|
||||
func TestService_removeInvalidBlockAndState(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
WithForkChoiceStore(protoarray.New(0, 0, [32]byte{})),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Deleting unknown block should not error.
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{{'a'}, {'b'}, {'c'}}))
|
||||
|
||||
// Happy case
|
||||
b1 := util.NewBeaconBlock()
|
||||
b1.Block.Slot = 1
|
||||
blk1, err := wrapper.WrappedSignedBeaconBlock(b1)
|
||||
require.NoError(t, err)
|
||||
r1, err := blk1.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk1))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 1,
|
||||
Root: r1[:],
|
||||
}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r1))
|
||||
|
||||
b2 := util.NewBeaconBlock()
|
||||
b2.Block.Slot = 2
|
||||
blk2, err := wrapper.WrappedSignedBeaconBlock(b2)
|
||||
require.NoError(t, err)
|
||||
r2, err := blk2.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk2))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: 2,
|
||||
Root: r2[:],
|
||||
}))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, r2))
|
||||
|
||||
require.NoError(t, service.removeInvalidBlockAndState(ctx, [][32]byte{r1, r2}))
|
||||
|
||||
require.Equal(t, false, service.hasBlock(ctx, r1))
|
||||
require.Equal(t, false, service.hasBlock(ctx, r2))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r1))
|
||||
require.Equal(t, false, service.cfg.BeaconDB.HasStateSummary(ctx, r2))
|
||||
has, err := service.cfg.StateGen.HasState(ctx, r1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
has, err = service.cfg.StateGen.HasState(ctx, r2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, has)
|
||||
}
|
||||
|
||||
func TestService_getFinalizedPayloadHash(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
opts := []Option{
|
||||
WithDatabase(beaconDB),
|
||||
WithStateGen(stategen.New(beaconDB)),
|
||||
}
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Use the block in DB
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
|
||||
h, err := service.getFinalizedPayloadHash(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
|
||||
|
||||
// Use the block in init sync cache
|
||||
b = util.NewBeaconBlockBellatrix()
|
||||
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hello"), 32)
|
||||
blk, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
service.initSyncBlocks[r] = blk
|
||||
h, err = service.getFinalizedPayloadHash(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
|
||||
|
||||
// Use the weak subjectivity sync block
|
||||
b = util.NewBeaconBlockBellatrix()
|
||||
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("howdy"), 32)
|
||||
blk, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
r, err = b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, blk))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, r))
|
||||
h, err = service.getFinalizedPayloadHash(ctx, r)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bytesutil.ToBytes32(b.Block.Body.ExecutionPayload.BlockHash), h)
|
||||
|
||||
// None of the above should error
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveOriginCheckpointBlockRoot(ctx, [32]byte{'a'}))
|
||||
_, err = service.getFinalizedPayloadHash(ctx, [32]byte{'a'})
|
||||
require.ErrorContains(t, "does not exist in the db or our cache", err)
|
||||
}
|
||||
|
||||
func TestService_getPayloadHash(t *testing.T) {
|
||||
// Pre-bellatrix
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
require.NoError(t, err)
|
||||
h, err := getPayloadHash(blk.Block())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, [32]byte{}, h)
|
||||
|
||||
// Post bellatrix
|
||||
b := util.NewBeaconBlockBellatrix()
|
||||
b.Block.Body.ExecutionPayload.BlockHash = bytesutil.PadTo([]byte("hi"), 32)
|
||||
blk, err = wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
h, err = getPayloadHash(blk.Block())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, bytesutil.ToBytes32(bytesutil.PadTo([]byte("hi"), 32)), h)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package blockchain
|
||||
|
||||
import (
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
@@ -11,7 +12,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
@@ -52,7 +52,7 @@ func WithChainStartFetcher(f powchain.ChainStartFetcher) Option {
|
||||
}
|
||||
|
||||
// WithExecutionEngineCaller to call execution engine.
|
||||
func WithExecutionEngineCaller(c enginev1.Caller) Option {
|
||||
func WithExecutionEngineCaller(c powchain.EngineCaller) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ExecutionEngineCaller = c
|
||||
return nil
|
||||
@@ -67,6 +67,14 @@ func WithDepositCache(c *depositcache.DepositCache) Option {
|
||||
}
|
||||
}
|
||||
|
||||
// WithProposerIdsCache for proposer id cache.
|
||||
func WithProposerIdsCache(c *cache.ProposerPayloadIDsCache) Option {
|
||||
return func(s *Service) error {
|
||||
s.cfg.ProposerSlotIndexCache = c
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAttestationPool for attestation lifecycle after chain inclusion.
|
||||
func WithAttestationPool(p attestations.Pool) Option {
|
||||
return func(s *Service) error {
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/holiman/uint256"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
mocks "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -117,13 +118,13 @@ func Test_validateMergeBlock(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
engine.blks[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[[32]byte{'a'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'b'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x2",
|
||||
}
|
||||
engine.blks[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[[32]byte{'b'}] = &enginev1.ExecutionBlock{
|
||||
ParentHash: bytesutil.PadTo([]byte{'3'}, fieldparams.RootLength),
|
||||
TotalDifficulty: "0x1",
|
||||
}
|
||||
@@ -158,12 +159,12 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
engine := &mockEngineService{blks: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
engine := &mocks.EngineClient{BlockByHashMap: map[[32]byte]*enginev1.ExecutionBlock{}}
|
||||
service.cfg.ExecutionEngineCaller = engine
|
||||
h := [32]byte{'a'}
|
||||
p := [32]byte{'b'}
|
||||
td := "0x1"
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: td,
|
||||
}
|
||||
@@ -175,18 +176,18 @@ func Test_getBlkParentHashAndTD(t *testing.T) {
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, []byte{'c'})
|
||||
require.ErrorContains(t, "could not get pow block: block not found", err)
|
||||
|
||||
engine.blks[h] = nil
|
||||
engine.BlockByHashMap[h] = nil
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "pow block is nil", err)
|
||||
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "1",
|
||||
}
|
||||
_, _, err = service.getBlkParentHashAndTD(ctx, h[:])
|
||||
require.ErrorContains(t, "could not decode merge block total difficulty: hex string without 0x prefix", err)
|
||||
|
||||
engine.blks[h] = &enginev1.ExecutionBlock{
|
||||
engine.BlockByHashMap[h] = &enginev1.ExecutionBlock{
|
||||
ParentHash: p[:],
|
||||
TotalDifficulty: "0XFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF",
|
||||
}
|
||||
|
||||
@@ -6,11 +6,13 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
coreTime "github.com/prysmaticlabs/prysm/beacon-chain/core/time"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/config/features"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
@@ -107,22 +109,42 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
postStateVersion, postStateHeader, err := getStateVersionAndPayload(postState)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.notifyNewPayload(ctx, preStateVersion, postStateVersion, preStateHeader, postStateHeader, signed, blockRoot); err != nil {
|
||||
isValidPayload, err := s.notifyNewPayload(ctx, preStateVersion, postStateVersion, preStateHeader, postStateHeader, signed)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not verify new payload")
|
||||
}
|
||||
if !isValidPayload {
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
}
|
||||
if !candidate {
|
||||
return errNotOptimisticCandidate
|
||||
}
|
||||
}
|
||||
|
||||
if err := s.savePostStateInfo(ctx, blockRoot, signed, postState, false /* reg sync */); err != nil {
|
||||
return err
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
// We add a proposer score boost to fork choice for the block root if applicable, right after
|
||||
// running a successful state transition for the block.
|
||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(
|
||||
ctx, signed.Block().Slot(), blockRoot, s.genesisTime,
|
||||
); err != nil {
|
||||
secondsIntoSlot := uint64(time.Since(s.genesisTime).Seconds()) % params.BeaconConfig().SecondsPerSlot
|
||||
if err := s.cfg.ForkChoiceStore.BoostProposerRoot(ctx, &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: signed.Block().Slot(),
|
||||
CurrentSlot: slots.SinceGenesis(s.genesisTime),
|
||||
SecondsIntoSlot: secondsIntoSlot,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
@@ -182,12 +204,24 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
msg := fmt.Sprintf("could not read balances for state w/ justified checkpoint %#x", justified.Root)
|
||||
return errors.Wrap(err, msg)
|
||||
}
|
||||
if err := s.updateHead(ctx, balances); err != nil {
|
||||
headRoot, err := s.updateHead(ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Could not update head")
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, s.headBlock().Block(), s.headRoot(), bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
headBlock, err := s.cfg.BeaconDB.Block(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, headRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, headState, headBlock.Block(), headRoot, bytesutil.ToBytes32(finalized.Root)); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.saveHead(ctx, headRoot, headBlock, headState); err != nil {
|
||||
return errors.Wrap(err, "could not save head")
|
||||
}
|
||||
|
||||
if err := s.pruneCanonicalAttsFromPool(ctx, blockRoot, signed); err != nil {
|
||||
return err
|
||||
@@ -232,7 +266,7 @@ func (s *Service) onBlock(ctx context.Context, signed block.SignedBeaconBlock, b
|
||||
if err := s.cfg.ForkChoiceStore.Prune(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not prune proto array fork choice nodes")
|
||||
}
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(ctx, fRoot)
|
||||
isOptimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not check if node is optimistically synced")
|
||||
}
|
||||
@@ -370,18 +404,35 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []block.SignedBeaconBlo
|
||||
// blocks have been verified, add them to forkchoice and call the engine
|
||||
for i, b := range blks {
|
||||
s.saveInitSyncBlock(blockRoots[i], b)
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if err := s.notifyNewPayload(ctx,
|
||||
isValidPayload, err := s.notifyNewPayload(ctx,
|
||||
preVersionAndHeaders[i].version,
|
||||
postVersionAndHeaders[i].version,
|
||||
preVersionAndHeaders[i].header,
|
||||
postVersionAndHeaders[i].header, b, blockRoots[i]); err != nil {
|
||||
postVersionAndHeaders[i].header, b)
|
||||
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if !isValidPayload {
|
||||
candidate, err := s.optimisticCandidateBlock(ctx, b.Block())
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not check if block is optimistic candidate")
|
||||
}
|
||||
if !candidate {
|
||||
return nil, nil, errNotOptimisticCandidate
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
if err := s.insertBlockToForkChoiceStore(ctx, b.Block(), blockRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if isValidPayload {
|
||||
if err := s.cfg.ForkChoiceStore.SetOptimisticToValid(ctx, blockRoots[i]); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "could not set optimistic block to valid")
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := s.notifyForkchoiceUpdate(ctx, preState, b.Block(), blockRoots[i], bytesutil.ToBytes32(fCheckpoints[i].Root)); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
@@ -455,6 +506,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
|
||||
if postState.Slot()+1 == s.nextEpochBoundarySlot {
|
||||
// Update caches for the next epoch at epoch boundary slot - 1.
|
||||
log.Infof("UpdateCommitteeCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot), slot=%d, epoch=%d", postState.Slot(), coreTime.CurrentEpoch(postState))
|
||||
if err := helpers.UpdateCommitteeCache(postState, coreTime.NextEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -463,6 +515,7 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot()+1 == s.nextEpochBoundarySlot)")
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, copied); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -478,9 +531,12 @@ func (s *Service) handleEpochBoundary(ctx context.Context, postState state.Beaco
|
||||
|
||||
// Update caches at epoch boundary slot.
|
||||
// The following updates have short cut to return nil cheaply if fulfilled during boundary slot - 1.
|
||||
log.Info("UpdateCommitteeCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
|
||||
if err := helpers.UpdateCommitteeCache(postState, coreTime.CurrentEpoch(postState)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.Info("UpdateProposerIndicesInCache from handleEpochBoundary (postState.Slot() >= s.nextEpochBoundarySlot)")
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, postState); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -535,7 +591,7 @@ func (s *Service) insertBlockToForkChoiceStore(ctx context.Context, blk block.Be
|
||||
|
||||
func getBlockPayloadHash(blk block.BeaconBlock) ([32]byte, error) {
|
||||
payloadHash := [32]byte{}
|
||||
if isPreBellatrix(blk.Version()) {
|
||||
if blocks.IsPreBellatrixVersion(blk.Version()) {
|
||||
return payloadHash, nil
|
||||
}
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
|
||||
@@ -247,10 +247,19 @@ func (s *Service) updateFinalized(ctx context.Context, cp *ethpb.Checkpoint) err
|
||||
}
|
||||
|
||||
fRoot := bytesutil.ToBytes32(cp.Root)
|
||||
optimistic, err := s.cfg.ForkChoiceStore.IsOptimistic(fRoot)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !optimistic {
|
||||
err = s.cfg.BeaconDB.SaveLastValidatedCheckpoint(ctx, cp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := s.cfg.StateGen.MigrateToCold(ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not migrate to cold")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -18,6 +18,7 @@ import (
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
doublylinkedtree "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/doubly-linked-tree"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/protoarray"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
@@ -253,7 +254,13 @@ func TestStore_OnBlock_ProposerBoostEarly(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, 0, [32]byte{'A'}, time.Now()))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: [32]byte{'A'},
|
||||
BlockSlot: types.Slot(0),
|
||||
CurrentSlot: 0,
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.BoostProposerRoot(ctx, args))
|
||||
_, err = service.cfg.ForkChoiceStore.Head(ctx, 0,
|
||||
params.BeaconConfig().ZeroHash, []uint64{}, 0)
|
||||
require.ErrorContains(t, "could not apply proposer boost score: invalid proposer boost root", err)
|
||||
@@ -1621,9 +1628,9 @@ func Test_getStateVersionAndPayload(t *testing.T) {
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
version, header, err := getStateVersionAndPayload(tt.st)
|
||||
ver, header, err := getStateVersionAndPayload(tt.st)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.version, version)
|
||||
require.Equal(t, tt.version, ver)
|
||||
require.DeepEqual(t, tt.header, header)
|
||||
})
|
||||
}
|
||||
|
||||
@@ -161,19 +161,19 @@ func (s *Service) spawnProcessAttestationsRoutine(stateFeed *event.Feed) {
|
||||
log.WithError(err).Errorf("Unable to get justified balances for root %v", justified.Root)
|
||||
continue
|
||||
}
|
||||
prevHead := s.headRoot()
|
||||
if err := s.updateHead(s.ctx, balances); err != nil {
|
||||
newHeadRoot, err := s.updateHead(s.ctx, balances)
|
||||
if err != nil {
|
||||
log.WithError(err).Warn("Resolving fork due to new attestation")
|
||||
}
|
||||
s.notifyEngineIfChangedHead(prevHead)
|
||||
s.notifyEngineIfChangedHead(s.ctx, newHeadRoot)
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
// This calls notify Forkchoice Update in the event that the head has changed
|
||||
func (s *Service) notifyEngineIfChangedHead(prevHead [32]byte) {
|
||||
if s.headRoot() == prevHead {
|
||||
func (s *Service) notifyEngineIfChangedHead(ctx context.Context, newHeadRoot [32]byte) {
|
||||
if s.headRoot() == newHeadRoot {
|
||||
return
|
||||
}
|
||||
finalized := s.store.FinalizedCheckpt()
|
||||
@@ -181,14 +181,28 @@ func (s *Service) notifyEngineIfChangedHead(prevHead [32]byte) {
|
||||
log.WithError(errNilFinalizedInStore).Error("could not get finalized checkpoint")
|
||||
return
|
||||
}
|
||||
_, err := s.notifyForkchoiceUpdate(s.ctx,
|
||||
s.headBlock().Block(),
|
||||
s.headRoot(),
|
||||
newHeadBlock, err := s.cfg.BeaconDB.Block(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get block from db")
|
||||
return
|
||||
}
|
||||
headState, err := s.cfg.StateGen.StateByRoot(ctx, newHeadRoot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not get state from db")
|
||||
return
|
||||
}
|
||||
_, err = s.notifyForkchoiceUpdate(s.ctx,
|
||||
headState,
|
||||
newHeadBlock.Block(),
|
||||
newHeadRoot,
|
||||
bytesutil.ToBytes32(finalized.Root),
|
||||
)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("could not notify forkchoice update")
|
||||
}
|
||||
if err := s.saveHead(ctx, newHeadRoot, newHeadBlock, headState); err != nil {
|
||||
log.WithError(err).Error("could not save head")
|
||||
}
|
||||
}
|
||||
|
||||
// This processes fork choice attestations from the pool to account for validator votes and fork choice.
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/transition"
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
@@ -133,31 +134,43 @@ func TestNotifyEngineIfChangedHead(t *testing.T) {
|
||||
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
service.notifyEngineIfChangedHead(service.headRoot())
|
||||
service.cfg.ProposerSlotIndexCache = cache.NewProposerPayloadIDsCache()
|
||||
service.notifyEngineIfChangedHead(ctx, service.headRoot())
|
||||
hookErr := "could not notify forkchoice update"
|
||||
finalizedErr := "could not get finalized checkpoint"
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
service.notifyEngineIfChangedHead([32]byte{'a'})
|
||||
service.notifyEngineIfChangedHead(ctx, [32]byte{'a'})
|
||||
require.LogsContain(t, hook, finalizedErr)
|
||||
|
||||
hook.Reset()
|
||||
service.head = &head{
|
||||
root: [32]byte{'a'},
|
||||
block: nil, /* should not panic if notify head uses correct head */
|
||||
}
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
b.Block.Slot = 2
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
r1, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
finalized := ðpb.Checkpoint{Root: r[:], Epoch: 0}
|
||||
finalized := ðpb.Checkpoint{Root: r1[:], Epoch: 0}
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
service.head = &head{
|
||||
slot: 1,
|
||||
root: r,
|
||||
root: r1,
|
||||
block: wsb,
|
||||
state: st,
|
||||
}
|
||||
service.cfg.ProposerSlotIndexCache.SetProposerAndPayloadIDs(2, 1, [8]byte{1})
|
||||
service.store.SetFinalizedCheckpt(finalized)
|
||||
service.notifyEngineIfChangedHead([32]byte{'b'})
|
||||
service.notifyEngineIfChangedHead(ctx, r1)
|
||||
require.LogsDoNotContain(t, hook, finalizedErr)
|
||||
require.LogsDoNotContain(t, hook, hookErr)
|
||||
vId, payloadID, has := service.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(2)
|
||||
require.Equal(t, true, has)
|
||||
require.Equal(t, types.ValidatorIndex(1), vId)
|
||||
require.Equal(t, [8]byte{1}, payloadID)
|
||||
}
|
||||
|
||||
@@ -88,7 +88,6 @@ func (s *Service) ReceiveBlockBatch(ctx context.Context, blocks []block.SignedBe
|
||||
|
||||
for i, b := range blocks {
|
||||
blockCopy := b.Copy()
|
||||
// TODO(10261) check optimistic status
|
||||
if err = s.handleBlockAfterBatchVerify(ctx, blockCopy, blkRoots[i], fCheckpoints[i], jCheckpoints[i]); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
|
||||
@@ -29,7 +29,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/voluntaryexits"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
enginev1 "github.com/prysmaticlabs/prysm/beacon-chain/powchain/engine-api-client/v1"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -75,6 +74,7 @@ type config struct {
|
||||
ChainStartFetcher powchain.ChainStartFetcher
|
||||
BeaconDB db.HeadAccessDatabase
|
||||
DepositCache *depositcache.DepositCache
|
||||
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
|
||||
AttPool attestations.Pool
|
||||
ExitPool voluntaryexits.PoolManager
|
||||
SlashingPool slashings.PoolManager
|
||||
@@ -88,7 +88,7 @@ type config struct {
|
||||
WeakSubjectivityCheckpt *ethpb.Checkpoint
|
||||
BlockFetcher powchain.POWBlockFetcher
|
||||
FinalizedStateAtStartUp state.BeaconState
|
||||
ExecutionEngineCaller enginev1.Caller
|
||||
ExecutionEngineCaller powchain.EngineCaller
|
||||
}
|
||||
|
||||
// NewService instantiates a new block service instance that will
|
||||
@@ -191,14 +191,14 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
}
|
||||
s.store = store.New(justified, finalized)
|
||||
|
||||
var store f.ForkChoicer
|
||||
var f f.ForkChoicer
|
||||
fRoot := bytesutil.ToBytes32(finalized.Root)
|
||||
if features.Get().EnableForkChoiceDoublyLinkedTree {
|
||||
store = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
f = doublylinkedtree.New(justified.Epoch, finalized.Epoch)
|
||||
} else {
|
||||
store = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
f = protoarray.New(justified.Epoch, finalized.Epoch, fRoot)
|
||||
}
|
||||
s.cfg.ForkChoiceStore = store
|
||||
s.cfg.ForkChoiceStore = f
|
||||
fb, err := s.cfg.BeaconDB.Block(s.ctx, s.ensureRootNotZeros(fRoot))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not get finalized checkpoint block")
|
||||
@@ -211,7 +211,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not get execution payload hash")
|
||||
}
|
||||
fSlot := fb.Block().Slot()
|
||||
if err := store.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
if err := f.InsertOptimisticBlock(s.ctx, fSlot, fRoot, params.BeaconConfig().ZeroHash,
|
||||
payloadHash, justified.Epoch, finalized.Epoch); err != nil {
|
||||
return errors.Wrap(err, "could not insert finalized block to forkchoice")
|
||||
}
|
||||
@@ -221,7 +221,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
return errors.Wrap(err, "could not get last validated checkpoint")
|
||||
}
|
||||
if bytes.Equal(finalized.Root, lastValidatedCheckpoint.Root) {
|
||||
if err := store.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
if err := f.SetOptimisticToValid(s.ctx, fRoot); err != nil {
|
||||
return errors.Wrap(err, "could not set finalized block as validated")
|
||||
}
|
||||
}
|
||||
@@ -257,7 +257,7 @@ func (s *Service) StartFromSavedState(saved state.BeaconState) error {
|
||||
|
||||
func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error) {
|
||||
// first check if we have started from checkpoint sync and have a root
|
||||
originRoot, err := s.cfg.BeaconDB.OriginBlockRoot(ctx)
|
||||
originRoot, err := s.cfg.BeaconDB.OriginCheckpointBlockRoot(ctx)
|
||||
if err == nil {
|
||||
return originRoot, nil
|
||||
}
|
||||
@@ -265,7 +265,7 @@ func (s *Service) originRootFromSavedState(ctx context.Context) ([32]byte, error
|
||||
return originRoot, errors.Wrap(err, "could not retrieve checkpoint sync chain origin data from db")
|
||||
}
|
||||
|
||||
// we got here because OriginBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
|
||||
// we got here because OriginCheckpointBlockRoot gave us an ErrNotFound. this means the node was started from a genesis state,
|
||||
// so we should have a value for GenesisBlock
|
||||
genesisBlock, err := s.cfg.BeaconDB.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
@@ -362,9 +362,9 @@ func (s *Service) startFromPOWChain() error {
|
||||
defer stateSub.Unsubscribe()
|
||||
for {
|
||||
select {
|
||||
case event := <-stateChannel:
|
||||
if event.Type == statefeed.ChainStarted {
|
||||
data, ok := event.Data.(*statefeed.ChainStartedData)
|
||||
case e := <-stateChannel:
|
||||
if e.Type == statefeed.ChainStarted {
|
||||
data, ok := e.Data.(*statefeed.ChainStartedData)
|
||||
if !ok {
|
||||
log.Error("event data is not type *statefeed.ChainStartedData")
|
||||
return
|
||||
@@ -440,9 +440,11 @@ func (s *Service) initializeBeaconChain(
|
||||
s.cfg.ChainStartFetcher.ClearPreGenesisData()
|
||||
|
||||
// Update committee shuffled indices for genesis epoch.
|
||||
log.Infof("UpdateCommitteeCache from initializeBeaconChain, slot=%d", genesisState.Slot())
|
||||
if err := helpers.UpdateCommitteeCache(genesisState, 0 /* genesis epoch */); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
log.Info("UpdateProposerIndicesInCache from initializeBeaconChain")
|
||||
if err := helpers.UpdateProposerIndicesInCache(ctx, genesisState); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -6,7 +6,9 @@ import (
|
||||
"testing"
|
||||
|
||||
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -20,8 +22,11 @@ func TestChainService_SaveHead_DataRace(t *testing.T) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
b, err := wrapper.WrappedSignedBeaconBlock(util.NewBeaconBlock())
|
||||
st, _ := util.DeterministicGenesisState(t, 1)
|
||||
require.NoError(t, err)
|
||||
go func() {
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}()
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}))
|
||||
require.NoError(t, s.saveHead(context.Background(), [32]byte{}, b, st))
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/operations/attestations"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
v1 "github.com/prysmaticlabs/prysm/beacon-chain/state/v1"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -74,10 +75,14 @@ func (mb *mockBroadcaster) BroadcastSyncCommitteeMessage(_ context.Context, _ ui
|
||||
var _ p2p.Broadcaster = (*mockBroadcaster)(nil)
|
||||
|
||||
func setupBeaconChain(t *testing.T, beaconDB db.Database) *Service {
|
||||
endpoint := "http://127.0.0.1"
|
||||
ctx := context.Background()
|
||||
var web3Service *powchain.Service
|
||||
var err error
|
||||
srv, endpoint, err := mockPOW.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
srv.Stop()
|
||||
})
|
||||
bState, _ := util.DeterministicGenesisState(t, 10)
|
||||
pbState, err := v1.ProtobufBeaconState(bState.InnerStateUnsafe())
|
||||
require.NoError(t, err)
|
||||
@@ -496,12 +501,12 @@ func TestHasBlock_ForkChoiceAndDB_ProtoArray(t *testing.T) {
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
block := util.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
@@ -517,12 +522,12 @@ func TestHasBlock_ForkChoiceAndDB_DoublyLinkedTree(t *testing.T) {
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
block := util.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
beaconState, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
@@ -539,10 +544,10 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
|
||||
cancel: cancel,
|
||||
initSyncBlocks: make(map[[32]byte]block.SignedBeaconBlock),
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
b := util.NewBeaconBlock()
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
s.saveInitSyncBlock(r, wsb)
|
||||
require.NoError(t, s.Stop())
|
||||
@@ -569,11 +574,11 @@ func BenchmarkHasBlockDB(b *testing.B) {
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB},
|
||||
}
|
||||
block := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
blk := util.NewBeaconBlock()
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.cfg.BeaconDB.SaveBlock(ctx, wsb))
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
|
||||
b.ResetTimer()
|
||||
@@ -613,13 +618,13 @@ func BenchmarkHasBlockForkChoiceStore_DoublyLinkedTree(b *testing.B) {
|
||||
store: &store.Store{},
|
||||
}
|
||||
s.store.SetFinalizedCheckpt(ðpb.Checkpoint{Epoch: 0, Root: params.BeaconConfig().ZeroHash[:]})
|
||||
block := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := block.Block.HashTreeRoot()
|
||||
blk := ðpb.SignedBeaconBlock{Block: ðpb.BeaconBlock{Body: ðpb.BeaconBlockBody{}}}
|
||||
r, err := blk.Block.HashTreeRoot()
|
||||
require.NoError(b, err)
|
||||
bs := ðpb.BeaconState{FinalizedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}, CurrentJustifiedCheckpoint: ðpb.Checkpoint{Root: make([]byte, 32)}}
|
||||
beaconState, err := v1.InitializeFromProto(bs)
|
||||
require.NoError(b, err)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(block)
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(blk)
|
||||
require.NoError(b, err)
|
||||
require.NoError(b, s.insertBlockAndAttestationsToForkChoiceStore(ctx, wsb.Block(), r, beaconState))
|
||||
|
||||
|
||||
@@ -24,7 +24,7 @@ type stateByRooter interface {
|
||||
StateByRoot(context.Context, [32]byte) (state.BeaconState, error)
|
||||
}
|
||||
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a stagegen
|
||||
// newStateBalanceCache exists to remind us that stateBalanceCache needs a state gen
|
||||
// to avoid nil pointer bugs when updating the cache in the read path (get())
|
||||
func newStateBalanceCache(sg *stategen.State) (*stateBalanceCache, error) {
|
||||
if sg == nil {
|
||||
|
||||
@@ -15,15 +15,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
type mockStateByRoot struct {
|
||||
state state.BeaconState
|
||||
err error
|
||||
}
|
||||
|
||||
func (m *mockStateByRoot) StateByRoot(context.Context, [32]byte) (state.BeaconState, error) {
|
||||
return m.state, m.err
|
||||
}
|
||||
|
||||
type testStateOpt func(*ethpb.BeaconStateAltair)
|
||||
|
||||
func testStateWithValidators(v []*ethpb.Validator) testStateOpt {
|
||||
|
||||
@@ -61,6 +61,7 @@ type ChainService struct {
|
||||
SyncCommitteePubkeys [][]byte
|
||||
Genesis time.Time
|
||||
ForkChoiceStore forkchoice.ForkChoicer
|
||||
ReceiveBlockMockErr error
|
||||
}
|
||||
|
||||
// ForkChoicer mocks the same method in the chain service
|
||||
@@ -195,32 +196,35 @@ func (s *ChainService) ReceiveBlockBatch(ctx context.Context, blks []block.Signe
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
for _, block := range blks {
|
||||
if !bytes.Equal(s.Root, block.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, block.Block().ParentRoot())
|
||||
for _, b := range blks {
|
||||
if !bytes.Equal(s.Root, b.Block().ParentRoot()) {
|
||||
return errors.Errorf("wanted %#x but got %#x", s.Root, b.Block().ParentRoot())
|
||||
}
|
||||
if err := s.State.SetSlot(block.Block().Slot()); err != nil {
|
||||
if err := s.State.SetSlot(b.Block().Slot()); err != nil {
|
||||
return err
|
||||
}
|
||||
s.BlocksReceived = append(s.BlocksReceived, block)
|
||||
signingRoot, err := block.Block().HashTreeRoot()
|
||||
s.BlocksReceived = append(s.BlocksReceived, b)
|
||||
signingRoot, err := b.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if s.DB != nil {
|
||||
if err := s.DB.SaveBlock(ctx, block); err != nil {
|
||||
if err := s.DB.SaveBlock(ctx, b); err != nil {
|
||||
return err
|
||||
}
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, block.Block().Slot())
|
||||
logrus.Infof("Saved block with root: %#x at slot %d", signingRoot, b.Block().Slot())
|
||||
}
|
||||
s.Root = signingRoot[:]
|
||||
s.Block = block
|
||||
s.Block = b
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// ReceiveBlock mocks ReceiveBlock method in chain service.
|
||||
func (s *ChainService) ReceiveBlock(ctx context.Context, block block.SignedBeaconBlock, _ [32]byte) error {
|
||||
if s.ReceiveBlockMockErr != nil {
|
||||
return s.ReceiveBlockMockErr
|
||||
}
|
||||
if s.State == nil {
|
||||
return ErrNilState
|
||||
}
|
||||
@@ -417,7 +421,7 @@ func (s *ChainService) HeadValidatorIndexToPublicKey(_ context.Context, _ types.
|
||||
}
|
||||
|
||||
// HeadSyncCommitteeIndices mocks HeadSyncCommitteeIndices and always return `HeadNextSyncCommitteeIndices`.
|
||||
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, index types.ValidatorIndex, slot types.Slot) ([]types.CommitteeIndex, error) {
|
||||
func (s *ChainService) HeadSyncCommitteeIndices(_ context.Context, _ types.ValidatorIndex, _ types.Slot) ([]types.CommitteeIndex, error) {
|
||||
return s.SyncCommitteeIndices, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ func NewWeakSubjectivityVerifier(wsc *ethpb.Checkpoint, db weakSubjectivityDB) (
|
||||
// per 7342, a nil checkpoint, zero-root or zero-epoch should all fail validation
|
||||
// and return an error instead of creating a WeakSubjectivityVerifier that permits any chain history.
|
||||
if wsc == nil || len(wsc.Root) == 0 || wsc.Epoch == 0 {
|
||||
log.Warn("No valid weak subjectivity checkpoint specified, running without weak subjectivity verification")
|
||||
return &WeakSubjectivityVerifier{
|
||||
enabled: false,
|
||||
}, nil
|
||||
@@ -79,15 +80,17 @@ func (v *WeakSubjectivityVerifier) VerifyWeakSubjectivity(ctx context.Context, f
|
||||
if !v.db.HasBlock(ctx, v.root) {
|
||||
return errors.Wrap(errWSBlockNotFound, fmt.Sprintf("missing root %#x", v.root))
|
||||
}
|
||||
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(v.slot + params.BeaconConfig().SlotsPerEpoch)
|
||||
endSlot := v.slot + params.BeaconConfig().SlotsPerEpoch
|
||||
filter := filters.NewFilter().SetStartSlot(v.slot).SetEndSlot(endSlot)
|
||||
// A node should have the weak subjectivity block corresponds to the correct epoch in the DB.
|
||||
log.Infof("Searching block roots for weak subjectivity root=%#x, between slots %d-%d", v.root, v.slot, endSlot)
|
||||
roots, err := v.db.BlockRoots(ctx, filter)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error while retrieving block roots to verify weak subjectivity")
|
||||
}
|
||||
for _, root := range roots {
|
||||
if v.root == root {
|
||||
log.Info("Weak subjectivity check has passed")
|
||||
log.Info("Weak subjectivity check has passed!!")
|
||||
v.verified = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -14,60 +14,65 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func TestService_VerifyWeakSubjectivityRoot(t *testing.T) {
|
||||
beaconDB := testDB.SetupDB(t)
|
||||
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 32
|
||||
b.Block.Slot = 1792480
|
||||
wsb, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, beaconDB.SaveBlock(context.Background(), wsb))
|
||||
r, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
blockEpoch := slots.ToEpoch(b.Block.Slot)
|
||||
tests := []struct {
|
||||
wsVerified bool
|
||||
disabled bool
|
||||
wantErr error
|
||||
checkpt *ethpb.Checkpoint
|
||||
finalizedEpoch types.Epoch
|
||||
name string
|
||||
}{
|
||||
{
|
||||
name: "nil root and epoch",
|
||||
},
|
||||
{
|
||||
name: "already verified",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 2,
|
||||
wsVerified: true,
|
||||
name: "nil root and epoch",
|
||||
disabled: true,
|
||||
},
|
||||
{
|
||||
name: "not yet to verify, ws epoch higher than finalized epoch",
|
||||
checkpt: ðpb.Checkpoint{Epoch: 2},
|
||||
finalizedEpoch: 1,
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, 32), Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch - 1,
|
||||
},
|
||||
{
|
||||
name: "can't find the block in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength), Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
finalizedEpoch: blockEpoch + 1,
|
||||
wantErr: errWSBlockNotFound,
|
||||
},
|
||||
{
|
||||
name: "can't find the block corresponds to ws epoch in DB",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch - 2}, // Root belongs in epoch 1.
|
||||
finalizedEpoch: blockEpoch - 1,
|
||||
wantErr: errWSBlockNotFoundInEpoch,
|
||||
},
|
||||
{
|
||||
name: "can verify and pass",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: 1},
|
||||
finalizedEpoch: 3,
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch + 1,
|
||||
},
|
||||
{
|
||||
name: "equal epoch",
|
||||
checkpt: ðpb.Checkpoint{Root: r[:], Epoch: blockEpoch},
|
||||
finalizedEpoch: blockEpoch,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
wv, err := NewWeakSubjectivityVerifier(tt.checkpt, beaconDB)
|
||||
require.Equal(t, !tt.disabled, wv.enabled)
|
||||
require.NoError(t, err)
|
||||
s := &Service{
|
||||
cfg: &config{BeaconDB: beaconDB, WeakSubjectivityCheckpt: tt.checkpt},
|
||||
|
||||
3
beacon-chain/cache/BUILD.bazel
vendored
3
beacon-chain/cache/BUILD.bazel
vendored
@@ -13,6 +13,7 @@ go_library(
|
||||
"common.go",
|
||||
"doc.go",
|
||||
"error.go",
|
||||
"payload_id.go",
|
||||
"proposer_indices.go",
|
||||
"proposer_indices_disabled.go", # keep
|
||||
"proposer_indices_type.go",
|
||||
@@ -26,6 +27,7 @@ go_library(
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/cache",
|
||||
visibility = [
|
||||
"//beacon-chain:__subpackages__",
|
||||
"//testing/spectest:__subpackages__",
|
||||
"//tools:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
@@ -61,6 +63,7 @@ go_test(
|
||||
"checkpoint_state_test.go",
|
||||
"committee_fuzz_test.go",
|
||||
"committee_test.go",
|
||||
"payload_id_test.go",
|
||||
"proposer_indices_test.go",
|
||||
"skip_slot_cache_test.go",
|
||||
"subnet_ids_test.go",
|
||||
|
||||
73
beacon-chain/cache/payload_id.go
vendored
Normal file
73
beacon-chain/cache/payload_id.go
vendored
Normal file
@@ -0,0 +1,73 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
)
|
||||
|
||||
const vIdLength = 8
|
||||
const pIdLength = 8
|
||||
const vpIdsLength = vIdLength + pIdLength
|
||||
|
||||
// ProposerPayloadIDsCache is a cache of proposer payload IDs.
|
||||
// The key is the slot. The value is the concatenation of the proposer and payload IDs. 8 bytes each.
|
||||
type ProposerPayloadIDsCache struct {
|
||||
slotToProposerAndPayloadIDs map[types.Slot][vpIdsLength]byte
|
||||
sync.RWMutex
|
||||
}
|
||||
|
||||
// NewProposerPayloadIDsCache creates a new proposer payload IDs cache.
|
||||
func NewProposerPayloadIDsCache() *ProposerPayloadIDsCache {
|
||||
return &ProposerPayloadIDsCache{
|
||||
slotToProposerAndPayloadIDs: make(map[types.Slot][vpIdsLength]byte),
|
||||
}
|
||||
}
|
||||
|
||||
// GetProposerPayloadIDs returns the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) GetProposerPayloadIDs(slot types.Slot) (types.ValidatorIndex, [8]byte, bool) {
|
||||
f.RLock()
|
||||
defer f.RUnlock()
|
||||
ids, ok := f.slotToProposerAndPayloadIDs[slot]
|
||||
if !ok {
|
||||
return 0, [8]byte{}, false
|
||||
}
|
||||
vId := ids[:vIdLength]
|
||||
|
||||
b := ids[vIdLength:]
|
||||
var pId [pIdLength]byte
|
||||
copy(pId[:], b)
|
||||
|
||||
return types.ValidatorIndex(bytesutil.BytesToUint64BigEndian(vId)), pId, true
|
||||
}
|
||||
|
||||
// SetProposerAndPayloadIDs sets the proposer and payload IDs for the given slot.
|
||||
func (f *ProposerPayloadIDsCache) SetProposerAndPayloadIDs(slot types.Slot, vId types.ValidatorIndex, pId [8]byte) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
var vIdBytes [vIdLength]byte
|
||||
copy(vIdBytes[:], bytesutil.Uint64ToBytesBigEndian(uint64(vId)))
|
||||
|
||||
var bytes [vpIdsLength]byte
|
||||
copy(bytes[:], append(vIdBytes[:], pId[:]...))
|
||||
|
||||
_, ok := f.slotToProposerAndPayloadIDs[slot]
|
||||
// Ok to overwrite if the slot is already set but the payload ID is not set.
|
||||
// This combats the re-org case where payload assignment could change the epoch of.
|
||||
if !ok || (ok && pId != [pIdLength]byte{}) {
|
||||
f.slotToProposerAndPayloadIDs[slot] = bytes
|
||||
}
|
||||
}
|
||||
|
||||
// PrunePayloadIDs removes the payload id entries that's current than input slot.
|
||||
func (f *ProposerPayloadIDsCache) PrunePayloadIDs(slot types.Slot) {
|
||||
f.Lock()
|
||||
defer f.Unlock()
|
||||
|
||||
for s := range f.slotToProposerAndPayloadIDs {
|
||||
if slot > s {
|
||||
delete(f.slotToProposerAndPayloadIDs, s)
|
||||
}
|
||||
}
|
||||
}
|
||||
60
beacon-chain/cache/payload_id_test.go
vendored
Normal file
60
beacon-chain/cache/payload_id_test.go
vendored
Normal file
@@ -0,0 +1,60 @@
|
||||
package cache
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
func TestValidatorPayloadIDsCache_GetAndSaveValidatorPayloadIDs(t *testing.T) {
|
||||
cache := NewProposerPayloadIDsCache()
|
||||
i, p, ok := cache.GetProposerPayloadIDs(0)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
|
||||
slot := types.Slot(1234)
|
||||
vid := types.ValidatorIndex(34234324)
|
||||
pid := [8]byte{1, 2, 3, 3, 7, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(6786745)
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, [pIdLength]byte{})
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
|
||||
// reset cache without pid
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(11111)
|
||||
pid = [8]byte{3, 2, 3, 33, 72, 8, 7, 8}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, pid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, pid, p)
|
||||
|
||||
// reset cache with existing pid
|
||||
slot = types.Slot(9456456)
|
||||
vid = types.ValidatorIndex(11111)
|
||||
newPid := [8]byte{1, 2, 3, 33, 72, 8, 7, 1}
|
||||
cache.SetProposerAndPayloadIDs(slot, vid, newPid)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, vid, i)
|
||||
require.Equal(t, newPid, p)
|
||||
|
||||
// remove cache entry
|
||||
cache.PrunePayloadIDs(slot + 1)
|
||||
i, p, ok = cache.GetProposerPayloadIDs(slot)
|
||||
require.Equal(t, false, ok)
|
||||
require.Equal(t, types.ValidatorIndex(0), i)
|
||||
require.Equal(t, [pIdLength]byte{}, p)
|
||||
}
|
||||
@@ -45,6 +45,7 @@ go_library(
|
||||
"//proto/prysm/v1alpha1/attestation:go_default_library",
|
||||
"//proto/prysm/v1alpha1/block:go_default_library",
|
||||
"//proto/prysm/v1alpha1/slashings:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//runtime/version:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
|
||||
@@ -2,7 +2,6 @@ package blocks
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
|
||||
@@ -14,81 +13,75 @@ import (
|
||||
enginev1 "github.com/prysmaticlabs/prysm/proto/engine/v1"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/block"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// MergeTransitionComplete returns true if the transition to Bellatrix has completed.
|
||||
// IsMergeTransitionComplete returns true if the transition to Bellatrix has completed.
|
||||
// Meaning the payload header in beacon state is not `ExecutionPayloadHeader()` (i.e. not empty).
|
||||
//
|
||||
// Spec code:
|
||||
// def is_merge_transition_complete(state: BeaconState) -> bool:
|
||||
// return state.latest_execution_payload_header != ExecutionPayloadHeader()
|
||||
//
|
||||
// Deprecated: Use `IsMergeTransitionBlockUsingPayloadHeader` instead.
|
||||
func MergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
func IsMergeTransitionComplete(st state.BeaconState) (bool, error) {
|
||||
if st == nil {
|
||||
return false, errors.New("nil state")
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
h, err := st.LatestExecutionPayloadHeader()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return !isEmptyHeader(h), nil
|
||||
}
|
||||
|
||||
// MergeTransitionBlock returns true if the input block is the terminal merge block.
|
||||
// Meaning the header in beacon state is `ExecutionPayloadHeader()` (i.e. empty).
|
||||
// And the input block has a non-empty header.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_merge_transition_block(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return not is_merge_transition_complete(state) and body.execution_payload != ExecutionPayload()
|
||||
func MergeTransitionBlock(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
|
||||
mergeComplete, err := MergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if mergeComplete {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return ExecutionBlock(body)
|
||||
}
|
||||
|
||||
// IsMergeTransitionBlockUsingPayloadHeader returns true if the input block is the terminal merge block.
|
||||
// IsMergeTransitionBlockUsingPreStatePayloadHeader returns true if the input block is the terminal merge block.
|
||||
// Terminal merge block must be associated with an empty payload header.
|
||||
// This is an optimized version of MergeTransitionComplete where beacon state is not required as an argument.
|
||||
func IsMergeTransitionBlockUsingPayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
|
||||
// This assumes the header `h` is referenced as the parent state for block body `body.
|
||||
func IsMergeTransitionBlockUsingPreStatePayloadHeader(h *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
|
||||
if h == nil || body == nil {
|
||||
return false, errors.New("nil header or block body")
|
||||
}
|
||||
if !isEmptyHeader(h) {
|
||||
return false, nil
|
||||
}
|
||||
return ExecutionBlock(body)
|
||||
return IsExecutionBlock(body)
|
||||
}
|
||||
|
||||
// ExecutionBlock returns whether the block has a non-empty ExecutionPayload.
|
||||
// IsExecutionBlock returns whether the block has a non-empty ExecutionPayload.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_execution_block(block: BeaconBlock) -> bool:
|
||||
// return block.body.execution_payload != ExecutionPayload()
|
||||
func ExecutionBlock(body block.BeaconBlockBody) (bool, error) {
|
||||
func IsExecutionBlock(body block.BeaconBlockBody) (bool, error) {
|
||||
if body == nil {
|
||||
return false, errors.New("nil block body")
|
||||
}
|
||||
payload, err := body.ExecutionPayload()
|
||||
if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "ExecutionPayload is not supported in") {
|
||||
return false, nil
|
||||
}
|
||||
switch {
|
||||
case errors.Is(err, wrapper.ErrUnsupportedField):
|
||||
return false, nil
|
||||
case err != nil:
|
||||
return false, err
|
||||
default:
|
||||
}
|
||||
return !isEmptyPayload(payload), nil
|
||||
}
|
||||
|
||||
// ExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
// IsExecutionEnabled returns true if the beacon chain can begin executing.
|
||||
// Meaning the payload header is beacon state is non-empty or the payload in block body is non-empty.
|
||||
//
|
||||
// Spec code:
|
||||
// def is_execution_enabled(state: BeaconState, body: BeaconBlockBody) -> bool:
|
||||
// return is_merge_block(state, body) or is_merge_complete(state)
|
||||
// Deprecated: Use `IsExecutionEnabledUsingHeader` instead.
|
||||
func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
|
||||
if st.Version() == version.Phase0 || st.Version() == version.Altair {
|
||||
func IsExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, error) {
|
||||
if st == nil || body == nil {
|
||||
return false, errors.New("nil state or block body")
|
||||
}
|
||||
if IsPreBellatrixVersion(st.Version()) {
|
||||
return false, nil
|
||||
}
|
||||
header, err := st.LatestExecutionPayloadHeader()
|
||||
@@ -99,12 +92,17 @@ func ExecutionEnabled(st state.BeaconState, body block.BeaconBlockBody) (bool, e
|
||||
}
|
||||
|
||||
// IsExecutionEnabledUsingHeader returns true if the execution is enabled using post processed payload header and block body.
|
||||
// This is an optimized version of ExecutionEnabled where beacon state is not required as an argument.
|
||||
// This is an optimized version of IsExecutionEnabled where beacon state is not required as an argument.
|
||||
func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body block.BeaconBlockBody) (bool, error) {
|
||||
if !isEmptyHeader(header) {
|
||||
return true, nil
|
||||
}
|
||||
return ExecutionBlock(body)
|
||||
return IsExecutionBlock(body)
|
||||
}
|
||||
|
||||
// IsPreBellatrixVersion returns true if input version is before bellatrix fork.
|
||||
func IsPreBellatrixVersion(v int) bool {
|
||||
return v < version.Bellatrix
|
||||
}
|
||||
|
||||
// ValidatePayloadWhenMergeCompletes validates if payload is valid versus input beacon state.
|
||||
@@ -115,7 +113,7 @@ func IsExecutionEnabledUsingHeader(header *ethpb.ExecutionPayloadHeader, body bl
|
||||
// if is_merge_complete(state):
|
||||
// assert payload.parent_hash == state.latest_execution_payload_header.block_hash
|
||||
func ValidatePayloadWhenMergeCompletes(st state.BeaconState, payload *enginev1.ExecutionPayload) error {
|
||||
complete, err := MergeTransitionComplete(st)
|
||||
complete, err := IsMergeTransitionComplete(st)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -237,6 +235,9 @@ func PayloadToHeader(payload *enginev1.ExecutionPayload) (*ethpb.ExecutionPayloa
|
||||
}
|
||||
|
||||
func isEmptyPayload(p *enginev1.ExecutionPayload) bool {
|
||||
if p == nil {
|
||||
return true
|
||||
}
|
||||
if !bytes.Equal(p.ParentHash, make([]byte, fieldparams.RootLength)) {
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
func Test_MergeComplete(t *testing.T) {
|
||||
func Test_IsMergeComplete(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *ethpb.ExecutionPayloadHeader
|
||||
@@ -160,7 +160,7 @@ func Test_MergeComplete(t *testing.T) {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.payload))
|
||||
got, err := blocks.MergeTransitionComplete(st)
|
||||
got, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("mergeComplete() got = %v, want %v", got, tt.want)
|
||||
@@ -169,187 +169,6 @@ func Test_MergeComplete(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func Test_MergeBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
header *ethpb.ExecutionPayloadHeader
|
||||
want bool
|
||||
}{
|
||||
{
|
||||
name: "empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "non-empty header, empty payload",
|
||||
payload: emptyPayload(),
|
||||
header: func() *ethpb.ExecutionPayloadHeader {
|
||||
h := emptyPayloadHeader()
|
||||
h.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return h
|
||||
}(),
|
||||
want: false,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has parent hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has fee recipient",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.FeeRecipient = bytesutil.PadTo([]byte{'a'}, fieldparams.FeeRecipientLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has state root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.StateRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has receipt root",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ReceiptsRoot = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has logs bloom",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.LogsBloom = bytesutil.PadTo([]byte{'a'}, fieldparams.LogsBloomLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has random",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.PrevRandao = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has base fee",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BaseFeePerGas = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block hash",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has tx",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Transactions = [][]byte{{'a'}}
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has extra data",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.ExtraData = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has block number",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.BlockNumber = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas limit",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasLimit = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has gas used",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.GasUsed = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
{
|
||||
name: "empty header, payload has timestamp",
|
||||
payload: func() *enginev1.ExecutionPayload {
|
||||
p := emptyPayload()
|
||||
p.Timestamp = 1
|
||||
return p
|
||||
}(),
|
||||
header: emptyPayloadHeader(),
|
||||
want: true,
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
st, _ := util.DeterministicGenesisStateBellatrix(t, 1)
|
||||
require.NoError(t, st.SetLatestExecutionPayloadHeader(tt.header))
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.MergeTransitionBlock(st, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
@@ -520,7 +339,7 @@ func Test_IsMergeTransitionBlockUsingPayloadHeader(t *testing.T) {
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
body, err := wrapper.WrappedBellatrixBeaconBlockBody(blk.Block.Body)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.IsMergeTransitionBlockUsingPayloadHeader(tt.header, body)
|
||||
got, err := blocks.IsMergeTransitionBlockUsingPreStatePayloadHeader(tt.header, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("MergeTransitionBlock() got = %v, want %v", got, tt.want)
|
||||
@@ -556,14 +375,14 @@ func Test_IsExecutionBlock(t *testing.T) {
|
||||
blk.Block.Body.ExecutionPayload = tt.payload
|
||||
wrappedBlock, err := wrapper.WrappedBellatrixBeaconBlock(blk.Block)
|
||||
require.NoError(t, err)
|
||||
got, err := blocks.ExecutionBlock(wrappedBlock.Body())
|
||||
got, err := blocks.IsExecutionBlock(wrappedBlock.Body())
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tt.want, got)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func Test_ExecutionEnabled(t *testing.T) {
|
||||
func Test_IsExecutionEnabled(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
payload *enginev1.ExecutionPayload
|
||||
@@ -630,10 +449,10 @@ func Test_ExecutionEnabled(t *testing.T) {
|
||||
if tt.useAltairSt {
|
||||
st, _ = util.DeterministicGenesisStateAltair(t, 1)
|
||||
}
|
||||
got, err := blocks.ExecutionEnabled(st, body)
|
||||
got, err := blocks.IsExecutionEnabled(st, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("ExecutionEnabled() got = %v, want %v", got, tt.want)
|
||||
t.Errorf("IsExecutionEnabled() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -696,7 +515,7 @@ func Test_IsExecutionEnabledUsingHeader(t *testing.T) {
|
||||
got, err := blocks.IsExecutionEnabledUsingHeader(tt.header, body)
|
||||
require.NoError(t, err)
|
||||
if got != tt.want {
|
||||
t.Errorf("ExecutionEnabled() got = %v, want %v", got, tt.want)
|
||||
t.Errorf("IsExecutionEnabled() got = %v, want %v", got, tt.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -904,7 +723,7 @@ func BenchmarkBellatrixComplete(b *testing.B) {
|
||||
|
||||
b.ResetTimer()
|
||||
for i := 0; i < b.N; i++ {
|
||||
_, err := blocks.MergeTransitionComplete(st)
|
||||
_, err := blocks.IsMergeTransitionComplete(st)
|
||||
require.NoError(b, err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
log "github.com/sirupsen/logrus"
|
||||
"sort"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
@@ -175,9 +176,7 @@ func CommitteeAssignments(
|
||||
return nil, nil, err
|
||||
}
|
||||
proposerIndexToSlots := make(map[types.ValidatorIndex][]types.Slot, params.BeaconConfig().SlotsPerEpoch)
|
||||
// Proposal epochs do not have a look ahead, so we skip them over here.
|
||||
validProposalEpoch := epoch < nextEpoch
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch && validProposalEpoch; slot++ {
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
// Skip proposer assignment for genesis slot.
|
||||
if slot == 0 {
|
||||
continue
|
||||
@@ -192,6 +191,15 @@ func CommitteeAssignments(
|
||||
proposerIndexToSlots[i] = append(proposerIndexToSlots[i], slot)
|
||||
}
|
||||
|
||||
// If previous proposer indices computation is outside if current proposal epoch range,
|
||||
// we need to reset state slot back to start slot so that we can compute the correct committees.
|
||||
currentProposalEpoch := epoch < nextEpoch
|
||||
if !currentProposalEpoch {
|
||||
if err := state.SetSlot(state.Slot() - params.BeaconConfig().SlotsPerEpoch); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
activeValidatorIndices, err := ActiveValidatorIndices(ctx, state, epoch)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
@@ -279,40 +287,43 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch types.Epoch) ([]types.Va
|
||||
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
|
||||
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
|
||||
func UpdateCommitteeCache(state state.ReadOnlyBeaconState, epoch types.Epoch) error {
|
||||
for _, e := range []types.Epoch{epoch, epoch + 1} {
|
||||
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if committeeCache.HasEntry(string(seed[:])) {
|
||||
return nil
|
||||
}
|
||||
|
||||
shuffledIndices, err := ShuffledIndices(state, e)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
|
||||
|
||||
// Store the sorted indices as well as shuffled indices. In current spec,
|
||||
// sorted indices is required to retrieve proposer index. This is also
|
||||
// used for failing verify signature fallback.
|
||||
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
|
||||
copy(sortedIndices, shuffledIndices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
|
||||
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
|
||||
Seed: seed,
|
||||
SortedIndices: sortedIndices,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
//for _, e := range []types.Epoch{epoch, epoch + 1} {
|
||||
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("computed seed=%#x for slot=%d", seed, state.Slot())
|
||||
if committeeCache.HasEntry(string(seed[:])) {
|
||||
log.Infof("UpdateCommitteeCache: seed=%#x already in cache at slot=%d", seed, state.Slot())
|
||||
return nil
|
||||
}
|
||||
|
||||
shuffledIndices, err := ShuffledIndices(state, epoch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
|
||||
|
||||
// Store the sorted indices as well as shuffled indices. In current spec,
|
||||
// sorted indices is required to retrieve proposer index. This is also
|
||||
// used for failing verify signature fallback.
|
||||
sortedIndices := make([]types.ValidatorIndex, len(shuffledIndices))
|
||||
copy(sortedIndices, shuffledIndices)
|
||||
sort.Slice(sortedIndices, func(i, j int) bool {
|
||||
return sortedIndices[i] < sortedIndices[j]
|
||||
})
|
||||
|
||||
log.Infof("UpdateCommitteeCache: epoch=%d, state.slot=%d, indices=%v, seed=%#x", epoch, state.Slot(), sortedIndices, seed)
|
||||
if err := committeeCache.AddCommitteeShuffledList(&cache.Committees{
|
||||
ShuffledIndices: shuffledIndices,
|
||||
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
|
||||
Seed: seed,
|
||||
SortedIndices: sortedIndices,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
//}
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -356,6 +367,7 @@ func UpdateProposerIndicesInCache(ctx context.Context, state state.ReadOnlyBeaco
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
log.Infof("UpdateProposerIndicesInCache: state.slot=%d, slot=%d, root=%#x, indices=%v", state.Slot(), s, r, indices)
|
||||
return proposerIndicesCache.AddProposerIndices(&cache.ProposerIndices{
|
||||
BlockRoot: bytesutil.ToBytes32(r),
|
||||
ProposerIndices: proposerIndices,
|
||||
|
||||
@@ -232,7 +232,7 @@ func TestCommitteeAssignments_CannotRetrieveFuture(t *testing.T) {
|
||||
|
||||
_, proposerIndxs, err = CommitteeAssignments(context.Background(), state, time.CurrentEpoch(state)+1)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(proposerIndxs), "wanted empty proposer index set")
|
||||
require.NotEqual(t, 0, len(proposerIndxs), "wanted non-zero proposer index set")
|
||||
}
|
||||
|
||||
func TestCommitteeAssignments_EverySlotHasMin1Proposer(t *testing.T) {
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/crypto/bls"
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Seed returns the randao seed used for shuffling of a given epoch.
|
||||
@@ -33,6 +34,8 @@ func Seed(state state.ReadOnlyBeaconState, epoch types.Epoch, domain [bls.Domain
|
||||
|
||||
seed32 := hash.Hash(seed)
|
||||
|
||||
log.Infof("seed computation params for slot=%d: domain=%#x, epoch=%d, randaoMix=%#x", state.Slot(), domain, epoch, randaoMix)
|
||||
|
||||
return seed32, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -3,6 +3,8 @@ package helpers
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -15,7 +17,6 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/crypto/hash"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
log "github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -88,11 +89,27 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not get seed")
|
||||
}
|
||||
var ci []types.ValidatorIndex
|
||||
if s.Slot() == 78 {
|
||||
if err := s.ReadFromEveryValidator(func(idx int, val state.ReadOnlyValidator) error {
|
||||
if IsActiveValidatorUsingTrie(val, epoch) {
|
||||
ci = append(ci, types.ValidatorIndex(idx))
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
log.Errorf("got error doing double-check validator index computation=%v", err)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
activeIndices, err := committeeCache.ActiveIndices(ctx, seed)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not interface with committee cache")
|
||||
}
|
||||
if activeIndices != nil {
|
||||
if s.Slot() == 78 {
|
||||
log.Infof("double check indices for 78, len=%d, low=%d, high=%d", len(ci), ci[0], ci[len(ci)-1])
|
||||
}
|
||||
log.Infof("found indices in cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
|
||||
return activeIndices, nil
|
||||
}
|
||||
|
||||
@@ -106,6 +123,7 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, errors.New("nil active indices")
|
||||
}
|
||||
CommitteeCacheInProgressHit.Inc()
|
||||
log.Infof("found indices in in-progress cache for slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(activeIndices), activeIndices[0], activeIndices[len(activeIndices)-1])
|
||||
return activeIndices, nil
|
||||
}
|
||||
return nil, errors.Wrap(err, "could not mark committee cache as in progress")
|
||||
@@ -126,9 +144,16 @@ func ActiveValidatorIndices(ctx context.Context, s state.ReadOnlyBeaconState, ep
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Infof("computed indices slot=%d, len=%d, low=%d, high=%d", s.Slot(), len(indices), indices[0], indices[len(indices)-1])
|
||||
log.Infof("UpdateCommitteeCache from ActiveValidatorIndices, slot=%d", s.Slot())
|
||||
if err := UpdateCommitteeCache(s, epoch); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
/*
|
||||
if err := UpdateProposerIndicesInCache(ctx, s); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to update proposer indices cache in ActiveValidatorIndices")
|
||||
}
|
||||
*/
|
||||
|
||||
return indices, nil
|
||||
}
|
||||
@@ -175,6 +200,7 @@ func ActiveValidatorCount(ctx context.Context, s state.ReadOnlyBeaconState, epoc
|
||||
return 0, err
|
||||
}
|
||||
|
||||
log.Infof("UpdateCommitteeCache from ActiveValidatorCount, slot=%d", s.Slot())
|
||||
if err := UpdateCommitteeCache(s, epoch); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
@@ -249,6 +275,7 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
}
|
||||
return proposerIndices[state.Slot()%params.BeaconConfig().SlotsPerEpoch], nil
|
||||
}
|
||||
log.Info("UpdateProposerIndicesInCache from BeaconProposerIndex")
|
||||
if err := UpdateProposerIndicesInCache(ctx, state); err != nil {
|
||||
return 0, errors.Wrap(err, "could not update committee cache")
|
||||
}
|
||||
@@ -259,15 +286,19 @@ func BeaconProposerIndex(ctx context.Context, state state.ReadOnlyBeaconState) (
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not generate seed")
|
||||
}
|
||||
fmt.Printf("BeaconProposerIndex:seed=%#x", seed)
|
||||
|
||||
seedWithSlot := append(seed[:], bytesutil.Bytes8(uint64(state.Slot()))...)
|
||||
fmt.Printf("BeaconProposerIndex:seedWithSlot=%#x", seed)
|
||||
seedWithSlotHash := hash.Hash(seedWithSlot)
|
||||
fmt.Printf("BeaconProposerIndex:seedWithSlotHash=%#x", seed)
|
||||
|
||||
indices, err := ActiveValidatorIndices(ctx, state, e)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "could not get active indices")
|
||||
}
|
||||
|
||||
log.Infof("validator index length=%d, low=%d, high=%d", len(indices), indices[0], indices[len(indices)-1])
|
||||
return ComputeProposerIndex(state, indices, seedWithSlotHash)
|
||||
}
|
||||
|
||||
|
||||
@@ -138,6 +138,7 @@ func ProcessSlotsUsingNextSlotCache(
|
||||
ctx, span := trace.StartSpan(ctx, "core.state.ProcessSlotsUsingNextSlotCache")
|
||||
defer span.End()
|
||||
|
||||
/*
|
||||
// Check whether the parent state has been advanced by 1 slot in next slot cache.
|
||||
nextSlotState, err := NextSlotState(ctx, parentRoot)
|
||||
if err != nil {
|
||||
@@ -148,6 +149,11 @@ func ProcessSlotsUsingNextSlotCache(
|
||||
// We replace next slot state with parent state.
|
||||
if cachedStateExists {
|
||||
parentState = nextSlotState
|
||||
root, err := parentState.HashTreeRoot(ctx)
|
||||
if err != nil {
|
||||
log.Errorf("weird, got an error calling HTR for the state=%v where root should be=%#x", err, parentRoot)
|
||||
}
|
||||
log.Infof("found state in NextSlotCache at slot=%d with root=%#x (parentRoot=%#x)", parentState.Slot(), root, parentRoot)
|
||||
}
|
||||
|
||||
// In the event our cached state has advanced our
|
||||
@@ -155,9 +161,12 @@ func ProcessSlotsUsingNextSlotCache(
|
||||
if cachedStateExists && parentState.Slot() == slot {
|
||||
return parentState, nil
|
||||
}
|
||||
|
||||
*/
|
||||
log.Infof("process_slots being called up to slot=%d where state.slot=%d", slot, parentState.Slot())
|
||||
// Since next slot cache only advances state by 1 slot,
|
||||
// we check if there's more slots that need to process.
|
||||
parentState, err = ProcessSlots(ctx, parentState, slot)
|
||||
parentState, err := ProcessSlots(ctx, parentState, slot)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process slots")
|
||||
}
|
||||
|
||||
@@ -282,23 +282,25 @@ func ProcessBlockForStateRoot(
|
||||
state, err = b.ProcessBlockHeaderNoVerify(ctx, state, blk.Slot(), blk.ProposerIndex(), blk.ParentRoot(), bodyRoot[:])
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
return nil, errors.Wrap(err, "could not process block header")
|
||||
r, err := signed.Block().HashTreeRoot()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process block header, also failed to compute its htr")
|
||||
}
|
||||
return nil, errors.Wrapf(err, "could not process block header, state slot=%d, root=%#x", state.Slot(), r)
|
||||
}
|
||||
|
||||
if state.Version() == version.Bellatrix {
|
||||
enabled, err := b.ExecutionEnabled(state, blk.Body())
|
||||
enabled, err := b.IsExecutionEnabled(state, blk.Body())
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
}
|
||||
if enabled {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not check if execution is enabled")
|
||||
return nil, err
|
||||
}
|
||||
if enabled {
|
||||
payload, err := blk.Body().ExecutionPayload()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
state, err = b.ProcessPayload(state, payload)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not process execution payload")
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -13,3 +13,9 @@ var ErrNotFoundState = kv.ErrNotFoundState
|
||||
|
||||
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
|
||||
var ErrNotFoundOriginBlockRoot = kv.ErrNotFoundOriginBlockRoot
|
||||
|
||||
// ErrNotFoundOriginBlockRoot wraps ErrNotFound for an error specific to the origin block root.
|
||||
var ErrNotFoundBackfillBlockRoot = kv.ErrNotFoundBackfillBlockRoot
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = kv.ErrNotFoundGenesisBlockRoot
|
||||
|
||||
@@ -27,6 +27,7 @@ type ReadOnlyDatabase interface {
|
||||
BlockRootsBySlot(ctx context.Context, slot types.Slot) (bool, [][32]byte, error)
|
||||
HasBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, error)
|
||||
GenesisBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
IsFinalizedBlock(ctx context.Context, blockRoot [32]byte) bool
|
||||
FinalizedChildBlock(ctx context.Context, blockRoot [32]byte) (block.SignedBeaconBlock, error)
|
||||
HighestSlotBlocksBelow(ctx context.Context, slot types.Slot) ([]block.SignedBeaconBlock, error)
|
||||
@@ -53,7 +54,8 @@ type ReadOnlyDatabase interface {
|
||||
// Fee reicipients operations.
|
||||
FeeRecipientByValidatorID(ctx context.Context, id types.ValidatorIndex) (common.Address, error)
|
||||
// origin checkpoint sync support
|
||||
OriginBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
BackfillBlockRoot(ctx context.Context) ([32]byte, error)
|
||||
}
|
||||
|
||||
// NoHeadAccessDatabase defines a struct without access to chain head data.
|
||||
@@ -97,12 +99,14 @@ type HeadAccessDatabase interface {
|
||||
SaveHeadBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
|
||||
// Genesis operations.
|
||||
LoadGenesis(ctx context.Context, r io.Reader) error
|
||||
LoadGenesis(ctx context.Context, stateBytes []byte) error
|
||||
SaveGenesisData(ctx context.Context, state state.BeaconState) error
|
||||
EnsureEmbeddedGenesis(ctx context.Context) error
|
||||
|
||||
// initialization method needed for origin checkpoint sync
|
||||
SaveOrigin(ctx context.Context, state io.Reader, block io.Reader) error
|
||||
SaveOrigin(ctx context.Context, serState, serBlock []byte) error
|
||||
SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error
|
||||
}
|
||||
|
||||
// SlasherDatabase interface for persisting data related to detecting slashable offenses on Ethereum.
|
||||
|
||||
@@ -94,6 +94,7 @@ go_test(
|
||||
"state_test.go",
|
||||
"utils_test.go",
|
||||
"validated_checkpoint_test.go",
|
||||
"wss_test.go",
|
||||
],
|
||||
data = glob(["testdata/**"]),
|
||||
embed = [":go_default_library"],
|
||||
@@ -101,6 +102,7 @@ go_test(
|
||||
"//beacon-chain/db/filters:go_default_library",
|
||||
"//beacon-chain/db/iface:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/genesis:go_default_library",
|
||||
"//beacon-chain/state/v1:go_default_library",
|
||||
"//beacon-chain/state/v2:go_default_library",
|
||||
"//config/features:go_default_library",
|
||||
|
||||
@@ -48,18 +48,18 @@ func (s *Store) Block(ctx context.Context, blockRoot [32]byte) (block.SignedBeac
|
||||
return blk, err
|
||||
}
|
||||
|
||||
// OriginBlockRoot returns the value written to the db in SaveOriginBlockRoot
|
||||
// OriginCheckpointBlockRoot returns the value written to the db in SaveOriginCheckpointBlockRoot
|
||||
// This is the root of a finalized block within the weak subjectivity period
|
||||
// at the time the chain was started, used to initialize the database and chain
|
||||
// without syncing from genesis.
|
||||
func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginBlockRoot")
|
||||
func (s *Store) OriginCheckpointBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.OriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(originBlockRootKey)
|
||||
rootSlice := bkt.Get(originCheckpointBlockRootKey)
|
||||
if rootSlice == nil {
|
||||
return ErrNotFoundOriginBlockRoot
|
||||
}
|
||||
@@ -70,6 +70,25 @@ func (s *Store) OriginBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
return root, err
|
||||
}
|
||||
|
||||
// BackfillBlockRoot keeps track of the highest block available before the OriginCheckpointBlockRoot
|
||||
func (s *Store) BackfillBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.BackfillBlockRoot")
|
||||
defer span.End()
|
||||
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
rootSlice := bkt.Get(backfillBlockRootKey)
|
||||
if len(rootSlice) == 0 {
|
||||
return ErrNotFoundBackfillBlockRoot
|
||||
}
|
||||
root = bytesutil.ToBytes32(rootSlice)
|
||||
return nil
|
||||
})
|
||||
|
||||
return root, err
|
||||
}
|
||||
|
||||
// HeadBlock returns the latest canonical block in the Ethereum Beacon Chain.
|
||||
func (s *Store) HeadBlock(ctx context.Context) (block.SignedBeaconBlock, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.HeadBlock")
|
||||
@@ -214,13 +233,17 @@ func (s *Store) DeleteBlock(ctx context.Context, root [32]byte) error {
|
||||
defer span.End()
|
||||
|
||||
if err := s.DeleteState(ctx, root); err != nil {
|
||||
return errDeleteFinalized
|
||||
return err
|
||||
}
|
||||
|
||||
if err := s.deleteStateSummary(root); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(finalizedBlockRootsIndexBucket)
|
||||
if b := bkt.Get(root[:]); b != nil {
|
||||
return errDeleteFinalized
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
if err := tx.Bucket(blocksBucket).Delete(root[:]); err != nil {
|
||||
@@ -325,6 +348,22 @@ func (s *Store) GenesisBlock(ctx context.Context) (block.SignedBeaconBlock, erro
|
||||
return blk, err
|
||||
}
|
||||
|
||||
func (s *Store) GenesisBlockRoot(ctx context.Context) ([32]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "BeaconDB.GenesisBlockRoot")
|
||||
defer span.End()
|
||||
var root [32]byte
|
||||
err := s.db.View(func(tx *bolt.Tx) error {
|
||||
bkt := tx.Bucket(blocksBucket)
|
||||
r := bkt.Get(genesisBlockRootKey)
|
||||
if len(r) == 0 {
|
||||
return ErrNotFoundGenesisBlockRoot
|
||||
}
|
||||
root = bytesutil.ToBytes32(r)
|
||||
return nil
|
||||
})
|
||||
return root, err
|
||||
}
|
||||
|
||||
// SaveGenesisBlockRoot to the db.
|
||||
func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveGenesisBlockRoot")
|
||||
@@ -335,16 +374,27 @@ func (s *Store) SaveGenesisBlockRoot(ctx context.Context, blockRoot [32]byte) er
|
||||
})
|
||||
}
|
||||
|
||||
// SaveOriginBlockRoot is used to keep track of the block root used for origin sync.
|
||||
// SaveOriginCheckpointBlockRoot is used to keep track of the block root used for syncing from a checkpoint origin.
|
||||
// This should be a finalized block from within the current weak subjectivity period.
|
||||
// This value is used by a running beacon chain node to locate the state at the beginning
|
||||
// of the chain history, in places where genesis would typically be used.
|
||||
func (s *Store) SaveOriginBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginBlockRoot")
|
||||
func (s *Store) SaveOriginCheckpointBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveOriginCheckpointBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(originBlockRootKey, blockRoot[:])
|
||||
return bucket.Put(originCheckpointBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
// SaveBackfillBlockRoot is used to keep track of the most recently backfilled block root when
|
||||
// the node was initialized via checkpoint sync.
|
||||
func (s *Store) SaveBackfillBlockRoot(ctx context.Context, blockRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "BeaconDB.SaveBackfillBlockRoot")
|
||||
defer span.End()
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(blocksBucket)
|
||||
return bucket.Put(backfillBlockRootKey, blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -58,6 +58,23 @@ var blockTests = []struct {
|
||||
},
|
||||
}
|
||||
|
||||
func TestStore_SaveBackfillBlockRoot(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
_, err := db.BackfillBlockRoot(ctx)
|
||||
require.ErrorIs(t, err, ErrNotFoundBackfillBlockRoot)
|
||||
|
||||
expected := [32]byte{}
|
||||
copy(expected[:], []byte{0x23})
|
||||
err = db.SaveBackfillBlockRoot(ctx, expected)
|
||||
require.NoError(t, err)
|
||||
actual, err := db.BackfillBlockRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, expected, actual)
|
||||
|
||||
}
|
||||
|
||||
func TestStore_SaveBlock_NoDuplicates(t *testing.T) {
|
||||
BlockCacheSize = 1
|
||||
slot := types.Slot(20)
|
||||
@@ -174,6 +191,16 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, genesisBlockRoot))
|
||||
blks := makeBlocks(t, 0, slotsPerEpoch*4, genesisBlockRoot)
|
||||
require.NoError(t, db.SaveBlocks(ctx, blks))
|
||||
ss := make([]*ethpb.StateSummary, len(blks))
|
||||
for i, blk := range blks {
|
||||
r, err := blk.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
ss[i] = ðpb.StateSummary{
|
||||
Slot: blk.Block().Slot(),
|
||||
Root: r[:],
|
||||
}
|
||||
}
|
||||
require.NoError(t, db.SaveStateSummaries(ctx, ss))
|
||||
|
||||
root, err := blks[slotsPerEpoch].Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
@@ -199,11 +226,50 @@ func TestStore_DeleteBlock(t *testing.T) {
|
||||
b, err = db.Block(ctx, root2)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, b, nil)
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, root2))
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), errDeleteFinalized)
|
||||
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteJustifiedBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
b.Block.Slot = 1
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveJustifiedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
|
||||
func TestStore_DeleteFinalizedBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
b := util.NewBeaconBlock()
|
||||
root, err := b.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
cp := ðpb.Checkpoint{
|
||||
Root: root[:],
|
||||
}
|
||||
st, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
blk, err := wrapper.WrappedSignedBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveBlock(ctx, blk))
|
||||
require.NoError(t, db.SaveState(ctx, st, root))
|
||||
require.NoError(t, db.SaveGenesisBlockRoot(ctx, root))
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, cp))
|
||||
require.ErrorIs(t, db.DeleteBlock(ctx, root), ErrDeleteJustifiedAndFinalized)
|
||||
}
|
||||
func TestStore_GenesisBlock(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -2,8 +2,8 @@ package kv
|
||||
|
||||
import "github.com/pkg/errors"
|
||||
|
||||
// errDeleteFinalized is raised when we attempt to delete a finalized block/state
|
||||
var errDeleteFinalized = errors.New("cannot delete finalized block or state")
|
||||
// ErrDeleteJustifiedAndFinalized is raised when we attempt to delete a finalized block/state
|
||||
var ErrDeleteJustifiedAndFinalized = errors.New("cannot delete finalized block or state")
|
||||
|
||||
// ErrNotFound can be used directly, or as a wrapped DBError, whenever a db method needs to
|
||||
// indicate that a value couldn't be found.
|
||||
@@ -13,5 +13,11 @@ var ErrNotFoundState = errors.Wrap(ErrNotFound, "state not found")
|
||||
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
|
||||
var ErrNotFoundOriginBlockRoot = errors.Wrap(ErrNotFound, "OriginBlockRoot")
|
||||
|
||||
// ErrNotFoundGenesisBlockRoot means no genesis block root was found, indicating the db was not initialized with genesis
|
||||
var ErrNotFoundGenesisBlockRoot = errors.Wrap(ErrNotFound, "OriginGenesisRoot")
|
||||
|
||||
// ErrNotFoundOriginBlockRoot is an error specifically for the origin block root getter
|
||||
var ErrNotFoundBackfillBlockRoot = errors.Wrap(ErrNotFound, "BackfillBlockRoot")
|
||||
|
||||
// ErrNotFoundFeeRecipient is a not found error specifically for the fee recipient getter
|
||||
var ErrNotFoundFeeRecipient = errors.Wrap(ErrNotFound, "fee recipient")
|
||||
|
||||
@@ -23,7 +23,7 @@ var previousFinalizedCheckpointKey = []byte("previous-finalized-checkpoint")
|
||||
var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to determine canonical")
|
||||
|
||||
// The finalized block roots index tracks beacon blocks which are finalized in the canonical chain.
|
||||
// The finalized checkpoint contains the the epoch which was finalized and the highest beacon block
|
||||
// The finalized checkpoint contains the epoch which was finalized and the highest beacon block
|
||||
// root where block.slot <= start_slot(epoch). As a result, we cannot index the finalized canonical
|
||||
// beacon block chain using the finalized root alone as this would exclude all other blocks in the
|
||||
// finalized epoch from being indexed as "final and canonical".
|
||||
@@ -32,7 +32,7 @@ var containerFinalizedButNotCanonical = []byte("recent block needs reindexing to
|
||||
// - De-index all finalized beacon block roots from previous_finalized_epoch to
|
||||
// new_finalized_epoch. (I.e. delete these roots from the index, to be re-indexed.)
|
||||
// - Build the canonical finalized chain by walking up the ancestry chain from the finalized block
|
||||
// root until a parent is found in the index or the parent is genesis.
|
||||
// root until a parent is found in the index, or the parent is genesis or the origin checkpoint.
|
||||
// - Add all block roots in the database where epoch(block.slot) == checkpoint.epoch.
|
||||
//
|
||||
// This method ensures that all blocks from the current finalized epoch are considered "final" while
|
||||
@@ -46,6 +46,7 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
root := checkpoint.Root
|
||||
var previousRoot []byte
|
||||
genesisRoot := tx.Bucket(blocksBucket).Get(genesisBlockRootKey)
|
||||
initCheckpointRoot := tx.Bucket(blocksBucket).Get(originCheckpointBlockRootKey)
|
||||
|
||||
// De-index recent finalized block roots, to be re-indexed.
|
||||
previousFinalizedCheckpoint := ðpb.Checkpoint{}
|
||||
@@ -104,6 +105,12 @@ func (s *Store) updateFinalizedBlockRoots(ctx context.Context, tx *bolt.Tx, chec
|
||||
return err
|
||||
}
|
||||
|
||||
// breaking here allows the initial checkpoint root to be correctly inserted,
|
||||
// but stops the loop from trying to search for its parent.
|
||||
if bytes.Equal(root, initCheckpointRoot) {
|
||||
break
|
||||
}
|
||||
|
||||
// Found parent, loop exit condition.
|
||||
if parentBytes := bkt.Get(block.ParentRoot()); parentBytes != nil {
|
||||
parent := ðpb.FinalizedBlockRootContainer{}
|
||||
|
||||
@@ -4,8 +4,6 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
|
||||
@@ -54,14 +52,10 @@ func (s *Store) SaveGenesisData(ctx context.Context, genesisState state.BeaconSt
|
||||
return nil
|
||||
}
|
||||
|
||||
// LoadGenesis loads a genesis state from a given file path, if no genesis exists already.
|
||||
func (s *Store) LoadGenesis(ctx context.Context, r io.Reader) error {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// LoadGenesis loads a genesis state from a ssz-serialized byte slice, if no genesis exists already.
|
||||
func (s *Store) LoadGenesis(ctx context.Context, sb []byte) error {
|
||||
st := ðpb.BeaconState{}
|
||||
if err := st.UnmarshalSSZ(b); err != nil {
|
||||
if err := st.UnmarshalSSZ(sb); err != nil {
|
||||
return err
|
||||
}
|
||||
gs, err := statev1.InitializeFromProtoUnsafe(st)
|
||||
|
||||
@@ -53,21 +53,16 @@ func TestLoadGenesisFromFile(t *testing.T) {
|
||||
if err == nil {
|
||||
fp = rfp
|
||||
}
|
||||
|
||||
r, err := os.Open(fp)
|
||||
sb, err := os.ReadFile(fp)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, r.Close())
|
||||
}()
|
||||
|
||||
db := setupDB(t)
|
||||
assert.NoError(t, db.LoadGenesis(context.Background(), r))
|
||||
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
testGenesisDataSaved(t, db)
|
||||
|
||||
// Loading the same genesis again should not throw an error
|
||||
_, err = r.Seek(0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, db.LoadGenesis(context.Background(), r))
|
||||
assert.NoError(t, db.LoadGenesis(context.Background(), sb))
|
||||
}
|
||||
|
||||
func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
|
||||
@@ -76,15 +71,12 @@ func TestLoadGenesisFromFile_mismatchedForkVersion(t *testing.T) {
|
||||
if err == nil {
|
||||
fp = rfp
|
||||
}
|
||||
r, err := os.Open(fp)
|
||||
sb, err := os.ReadFile(fp)
|
||||
assert.NoError(t, err)
|
||||
defer func() {
|
||||
assert.NoError(t, r.Close())
|
||||
}()
|
||||
|
||||
// Loading a genesis with the wrong fork version as beacon config should throw an error.
|
||||
db := setupDB(t)
|
||||
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), r))
|
||||
assert.ErrorContains(t, "does not match config genesis fork version", db.LoadGenesis(context.Background(), sb))
|
||||
}
|
||||
|
||||
func TestEnsureEmbeddedGenesis(t *testing.T) {
|
||||
|
||||
@@ -51,7 +51,9 @@ var (
|
||||
altairKey = []byte("altair")
|
||||
bellatrixKey = []byte("merge")
|
||||
// block root included in the beacon state used by weak subjectivity initial sync
|
||||
originBlockRootKey = []byte("origin-block-root")
|
||||
originCheckpointBlockRootKey = []byte("origin-checkpoint-block-root")
|
||||
// block root tracking the progress of backfill, or pointing at genesis if backfill has not been initiated
|
||||
backfillBlockRootKey = []byte("backfill-block-root")
|
||||
|
||||
// Deprecated: This index key was migrated in PR 6461. Do not use, except for migrations.
|
||||
lastArchivedIndexKey = []byte("last-archived")
|
||||
|
||||
@@ -346,19 +346,31 @@ func (s *Store) DeleteState(ctx context.Context, blockRoot [32]byte) error {
|
||||
|
||||
bkt = tx.Bucket(checkpointBucket)
|
||||
enc := bkt.Get(finalizedCheckpointKey)
|
||||
checkpoint := ðpb.Checkpoint{}
|
||||
finalized := ðpb.Checkpoint{}
|
||||
if enc == nil {
|
||||
checkpoint = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, checkpoint); err != nil {
|
||||
finalized = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, finalized); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
enc = bkt.Get(justifiedCheckpointKey)
|
||||
justified := ðpb.Checkpoint{}
|
||||
if enc == nil {
|
||||
justified = ðpb.Checkpoint{Root: genesisBlockRoot}
|
||||
} else if err := decode(ctx, enc, justified); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
blockBkt := tx.Bucket(blocksBucket)
|
||||
headBlkRoot := blockBkt.Get(headBlockRootKey)
|
||||
bkt = tx.Bucket(stateBucket)
|
||||
// Safe guard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], checkpoint.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], headBlkRoot) {
|
||||
return errors.New("cannot delete genesis, finalized, or head state")
|
||||
// Safeguard against deleting genesis, finalized, head state.
|
||||
if bytes.Equal(blockRoot[:], finalized.Root) || bytes.Equal(blockRoot[:], genesisBlockRoot) || bytes.Equal(blockRoot[:], justified.Root) {
|
||||
return ErrDeleteJustifiedAndFinalized
|
||||
}
|
||||
|
||||
// Nothing to delete if state doesn't exist.
|
||||
enc = bkt.Get(blockRoot[:])
|
||||
if enc == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
slot, err := s.slotByBlockRoot(ctx, tx, blockRoot[:])
|
||||
|
||||
@@ -110,3 +110,12 @@ func (s *Store) saveCachedStateSummariesDB(ctx context.Context) error {
|
||||
s.stateSummaryCache.clear()
|
||||
return nil
|
||||
}
|
||||
|
||||
// deleteStateSummary deletes a state summary object from the db using input block root.
|
||||
func (s *Store) deleteStateSummary(blockRoot [32]byte) error {
|
||||
s.stateSummaryCache.delete(blockRoot)
|
||||
return s.db.Update(func(tx *bolt.Tx) error {
|
||||
bucket := tx.Bucket(stateSummaryBucket)
|
||||
return bucket.Delete(blockRoot[:])
|
||||
})
|
||||
}
|
||||
|
||||
@@ -37,6 +37,13 @@ func (c *stateSummaryCache) has(r [32]byte) bool {
|
||||
return ok
|
||||
}
|
||||
|
||||
// delete state summary in cache.
|
||||
func (c *stateSummaryCache) delete(r [32]byte) {
|
||||
c.initSyncStateSummariesLock.Lock()
|
||||
defer c.initSyncStateSummariesLock.Unlock()
|
||||
delete(c.initSyncStateSummaries, r)
|
||||
}
|
||||
|
||||
// get retrieves a state summary from the initial sync state summaries cache using the root of
|
||||
// the block.
|
||||
func (c *stateSummaryCache) get(r [32]byte) *ethpb.StateSummary {
|
||||
|
||||
@@ -62,3 +62,17 @@ func TestStateSummary_CacheToDB(t *testing.T) {
|
||||
require.Equal(t, true, db.HasStateSummary(context.Background(), bytesutil.ToBytes32(r)))
|
||||
}
|
||||
}
|
||||
|
||||
func TestStateSummary_CanDelete(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
ctx := context.Background()
|
||||
r1 := bytesutil.ToBytes32([]byte{'A'})
|
||||
s1 := ðpb.StateSummary{Slot: 1, Root: r1[:]}
|
||||
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
require.NoError(t, db.SaveStateSummary(ctx, s1))
|
||||
require.Equal(t, true, db.HasStateSummary(ctx, r1), "State summary should be saved")
|
||||
|
||||
require.NoError(t, db.deleteStateSummary(r1))
|
||||
require.Equal(t, false, db.HasStateSummary(ctx, r1), "State summary should not be saved")
|
||||
}
|
||||
|
||||
@@ -412,7 +412,7 @@ func TestStore_DeleteGenesisState(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, db.SaveState(ctx, st, genesisBlockRoot))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
wantedErr := "cannot delete finalized block or state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, genesisBlockRoot))
|
||||
}
|
||||
|
||||
@@ -440,7 +440,7 @@ func TestStore_DeleteFinalizedState(t *testing.T) {
|
||||
require.NoError(t, db.SaveState(ctx, finalizedState, finalizedBlockRoot))
|
||||
finalizedCheckpoint := ðpb.Checkpoint{Root: finalizedBlockRoot[:]}
|
||||
require.NoError(t, db.SaveFinalizedCheckpoint(ctx, finalizedCheckpoint))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
wantedErr := "cannot delete finalized block or state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, finalizedBlockRoot))
|
||||
}
|
||||
|
||||
@@ -465,8 +465,7 @@ func TestStore_DeleteHeadState(t *testing.T) {
|
||||
require.NoError(t, st.SetSlot(100))
|
||||
require.NoError(t, db.SaveState(ctx, st, headBlockRoot))
|
||||
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
|
||||
wantedErr := "cannot delete genesis, finalized, or head state"
|
||||
assert.ErrorContains(t, wantedErr, db.DeleteState(ctx, headBlockRoot))
|
||||
require.NoError(t, db.DeleteState(ctx, headBlockRoot)) // Ok to delete head state if it's optimistic.
|
||||
}
|
||||
|
||||
func TestStore_SaveDeleteState_CanGetHighestBelow(t *testing.T) {
|
||||
|
||||
@@ -2,74 +2,87 @@ package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/ssz/detect"
|
||||
ethpb "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/prysmaticlabs/prysm/runtime/version"
|
||||
)
|
||||
|
||||
// SaveOrigin loads an ssz serialized Block & BeaconState from an io.Reader
|
||||
// (ex: an open file) prepares the database so that the beacon node can begin
|
||||
// syncing, using the provided values as their point of origin. This is an alternative
|
||||
// to syncing from genesis, and should only be run on an empty database.
|
||||
func (s *Store) SaveOrigin(ctx context.Context, stateReader, blockReader io.Reader) error {
|
||||
sb, err := ioutil.ReadAll(stateReader)
|
||||
func (s *Store) SaveOrigin(ctx context.Context, serState, serBlock []byte) error {
|
||||
genesisRoot, err := s.GenesisBlockRoot(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to read origin state bytes")
|
||||
if errors.Is(err, ErrNotFoundGenesisBlockRoot) {
|
||||
return errors.Wrap(err, "genesis block root not found: genesis must be provided for checkpoint sync")
|
||||
}
|
||||
return errors.Wrap(err, "genesis block root query error: checkpoint sync must verify genesis to proceed")
|
||||
}
|
||||
bb, err := ioutil.ReadAll(blockReader)
|
||||
err = s.SaveBackfillBlockRoot(ctx, genesisRoot)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "error reading block given to SaveOrigin")
|
||||
return errors.Wrap(err, "unable to save genesis root as initial backfill starting point for checkpoint sync")
|
||||
}
|
||||
|
||||
cf, err := detect.FromState(sb)
|
||||
cf, err := detect.FromState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to detect config and fork for origin state")
|
||||
return errors.Wrap(err, "could not sniff config+fork for origin state bytes")
|
||||
}
|
||||
bs, err := cf.UnmarshalBeaconState(sb)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not unmarshal origin state")
|
||||
}
|
||||
wblk, err := cf.UnmarshalBeaconBlock(bb)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to unmarshal origin SignedBeaconBlock")
|
||||
_, ok := params.BeaconConfig().ForkVersionSchedule[cf.Version]
|
||||
if !ok {
|
||||
return fmt.Errorf("config mismatch, beacon node configured to connect to %s, detected state is for %s", params.BeaconConfig().ConfigName, cf.Config.ConfigName)
|
||||
}
|
||||
|
||||
blockRoot, err := wblk.Block().HashTreeRoot()
|
||||
log.Infof("detected supported config for state & block version, config name=%s, fork name=%s", cf.Config.ConfigName, version.String(cf.Fork))
|
||||
state, err := cf.UnmarshalBeaconState(serState)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize origin state w/ bytes + config+fork")
|
||||
}
|
||||
|
||||
wblk, err := cf.UnmarshalBeaconBlock(serBlock)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to initialize origin block w/ bytes + config+fork")
|
||||
}
|
||||
blk := wblk.Block()
|
||||
|
||||
// save block
|
||||
blockRoot, err := blk.HashTreeRoot()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not compute HashTreeRoot of checkpoint block")
|
||||
}
|
||||
// save block
|
||||
log.Infof("saving checkpoint block to db, w/ root=%#x", blockRoot)
|
||||
if err := s.SaveBlock(ctx, wblk); err != nil {
|
||||
return errors.Wrap(err, "could not save checkpoint block")
|
||||
}
|
||||
|
||||
// save state
|
||||
if err = s.SaveState(ctx, bs, blockRoot); err != nil {
|
||||
log.Infof("calling SaveState w/ blockRoot=%x", blockRoot)
|
||||
if err = s.SaveState(ctx, state, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save state")
|
||||
}
|
||||
if err = s.SaveStateSummary(ctx, ðpb.StateSummary{
|
||||
Slot: bs.Slot(),
|
||||
Slot: state.Slot(),
|
||||
Root: blockRoot[:],
|
||||
}); err != nil {
|
||||
return errors.Wrap(err, "could not save state summary")
|
||||
}
|
||||
|
||||
// save origin block root in special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
}
|
||||
|
||||
// mark block as head of chain, so that processing will pick up from this point
|
||||
if err = s.SaveHeadBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save head block root")
|
||||
}
|
||||
|
||||
// save origin block root in a special key, to be used when the canonical
|
||||
// origin (start of chain, ie alternative to genesis) block or state is needed
|
||||
if err = s.SaveOriginCheckpointBlockRoot(ctx, blockRoot); err != nil {
|
||||
return errors.Wrap(err, "could not save origin block root")
|
||||
}
|
||||
|
||||
// rebuild the checkpoint from the block
|
||||
// use it to mark the block as justified and finalized
|
||||
slotEpoch, err := wblk.Block().Slot().SafeDivSlot(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
49
beacon-chain/db/kv/wss_test.go
Normal file
49
beacon-chain/db/kv/wss_test.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package kv
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/genesis"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1/wrapper"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
"github.com/prysmaticlabs/prysm/testing/util"
|
||||
)
|
||||
|
||||
func TestSaveOrigin(t *testing.T) {
|
||||
// Embedded Genesis works with Mainnet config
|
||||
params.SetupTestConfigCleanup(t)
|
||||
cfg := params.BeaconConfig()
|
||||
cfg.ConfigName = params.ConfigNames[params.Mainnet]
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
ctx := context.Background()
|
||||
db := setupDB(t)
|
||||
|
||||
st, err := genesis.State(params.Mainnet.String())
|
||||
require.NoError(t, err)
|
||||
|
||||
sb, err := st.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.LoadGenesis(ctx, sb))
|
||||
|
||||
// this is necessary for mainnet, because LoadGenesis is short-circuited by the embedded state,
|
||||
// so the genesis root key is never written to the db.
|
||||
require.NoError(t, db.EnsureEmbeddedGenesis(ctx))
|
||||
|
||||
cst, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
csb, err := cst.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
cb := util.NewBeaconBlock()
|
||||
scb, err := wrapper.WrappedSignedBeaconBlock(cb)
|
||||
require.NoError(t, err)
|
||||
cbb, err := scb.MarshalSSZ()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveOrigin(ctx, csb, cbb))
|
||||
|
||||
broot, err := scb.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, db.IsFinalizedBlock(ctx, broot))
|
||||
}
|
||||
@@ -12,6 +12,7 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
|
||||
@@ -19,14 +19,16 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -45,6 +47,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -3,8 +3,8 @@ package doublylinkedtree
|
||||
import "errors"
|
||||
|
||||
var ErrNilNode = errors.New("invalid nil or unknown node")
|
||||
var errInvalidBalance = errors.New("invalid node balance")
|
||||
var errInvalidProposerBoostRoot = errors.New("invalid proposer boost root")
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
var errUnknownPayloadHash = errors.New("unknown payload hash")
|
||||
|
||||
@@ -2,12 +2,15 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -18,6 +21,7 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch) *ForkChoice {
|
||||
finalizedEpoch: finalizedEpoch,
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodeByRoot: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
nodeByPayload: make(map[[fieldparams.RootLength]byte]*Node),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
@@ -168,7 +172,7 @@ func (f *ForkChoice) IsCanonical(root [32]byte) bool {
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if the given root has been optimistically synced.
|
||||
func (f *ForkChoice) IsOptimistic(_ context.Context, root [32]byte) (bool, error) {
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
@@ -249,9 +253,21 @@ func (f *ForkChoice) updateBalances(newBalances []uint64) error {
|
||||
return ErrNilNode
|
||||
}
|
||||
if currentNode.balance < oldBalance {
|
||||
return errInvalidBalance
|
||||
f.store.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(vote.currentRoot[:])),
|
||||
"oldBalance": oldBalance,
|
||||
"nodeBalance": currentNode.balance,
|
||||
"nodeWeight": currentNode.weight,
|
||||
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.proposerBoostRoot[:])),
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(f.store.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": f.store.previousProposerBoostScore,
|
||||
}).Warning("node with invalid balance, setting it to zero")
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
currentNode.balance = 0
|
||||
} else {
|
||||
currentNode.balance -= oldBalance
|
||||
}
|
||||
currentNode.balance -= oldBalance
|
||||
}
|
||||
}
|
||||
|
||||
@@ -302,6 +318,6 @@ func (f *ForkChoice) ForkChoiceNodes() []*pbrpc.ForkChoiceNode {
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid removes a block with an invalid execution payload from fork choice store
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.removeNode(ctx, root)
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [fieldparams.RootLength]byte) ([][32]byte, error) {
|
||||
return f.store.setOptimisticToInvalid(ctx, root, payloadHash)
|
||||
}
|
||||
|
||||
@@ -58,6 +58,30 @@ func TestForkChoice_UpdateBalancesNegativeChange(t *testing.T) {
|
||||
assert.Equal(t, uint64(30), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_UpdateBalancesUnderflow(t *testing.T) {
|
||||
f := setup(0, 0)
|
||||
ctx := context.Background()
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 1, indexToHash(1), params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 2, indexToHash(2), indexToHash(1), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 3, indexToHash(3), indexToHash(2), params.BeaconConfig().ZeroHash, 0, 0))
|
||||
s := f.store
|
||||
s.nodeByRoot[indexToHash(1)].balance = 100
|
||||
s.nodeByRoot[indexToHash(2)].balance = 100
|
||||
s.nodeByRoot[indexToHash(3)].balance = 100
|
||||
|
||||
f.balances = []uint64{125, 125, 125}
|
||||
f.votes = []Vote{
|
||||
{indexToHash(1), indexToHash(1), 0},
|
||||
{indexToHash(2), indexToHash(2), 0},
|
||||
{indexToHash(3), indexToHash(3), 0},
|
||||
}
|
||||
|
||||
require.NoError(t, f.updateBalances([]uint64{10, 20, 30}))
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(1)].balance)
|
||||
assert.Equal(t, uint64(0), s.nodeByRoot[indexToHash(2)].balance)
|
||||
assert.Equal(t, uint64(5), s.nodeByRoot[indexToHash(3)].balance)
|
||||
}
|
||||
|
||||
func TestForkChoice_IsCanonical(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
ctx := context.Background()
|
||||
|
||||
@@ -3,9 +3,12 @@ package doublylinkedtree
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "forkchoice-doublylinkedtree")
|
||||
|
||||
headSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "doublylinkedtree_head_slot",
|
||||
|
||||
@@ -109,7 +109,7 @@ func (n *Node) leadsToViableHead(justifiedEpoch, finalizedEpoch types.Epoch) boo
|
||||
return n.bestDescendant.viableForHead(justifiedEpoch, finalizedEpoch)
|
||||
}
|
||||
|
||||
// setNodeAndParentValidated sets the current node and the parent as validated (i.e. non-optimistic).
|
||||
// setNodeAndParentValidated sets the current node and all the ancestors as validated (i.e. non-optimistic).
|
||||
func (n *Node) setNodeAndParentValidated(ctx context.Context) error {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
|
||||
@@ -180,27 +180,27 @@ func TestNode_SetFullyValidated(t *testing.T) {
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 4, indexToHash(4), indexToHash(3), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 5, indexToHash(5), indexToHash(1), params.BeaconConfig().ZeroHash, 1, 1))
|
||||
|
||||
opt, err := f.IsOptimistic(ctx, indexToHash(5))
|
||||
opt, err := f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(4))
|
||||
opt, err = f.IsOptimistic(indexToHash(4))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
require.NoError(t, f.store.nodeByRoot[indexToHash(4)].setNodeAndParentValidated(ctx))
|
||||
|
||||
// block 5 should still be optimistic
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(5))
|
||||
opt, err = f.IsOptimistic(indexToHash(5))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, opt)
|
||||
|
||||
// block 4 and 3 should now be valid
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(4))
|
||||
opt, err = f.IsOptimistic(indexToHash(4))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
|
||||
opt, err = f.IsOptimistic(ctx, indexToHash(3))
|
||||
opt, err = f.IsOptimistic(indexToHash(3))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, opt)
|
||||
}
|
||||
|
||||
@@ -2,22 +2,54 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
func (s *Store) setOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
|
||||
s.nodesLock.Lock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, ErrNilNode
|
||||
}
|
||||
// Check if last valid hash is an ancestor of the passed node.
|
||||
lastValid, ok := s.nodeByPayload[payloadHash]
|
||||
if !ok || lastValid == nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, errUnknownPayloadHash
|
||||
}
|
||||
firstInvalid := node
|
||||
for ; firstInvalid.parent != nil && firstInvalid.parent.payloadHash != payloadHash; firstInvalid = firstInvalid.parent {
|
||||
if ctx.Err() != nil {
|
||||
s.nodesLock.Unlock()
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
}
|
||||
// If the last valid payload is in a different fork, we remove only the
|
||||
// passed node.
|
||||
if firstInvalid.parent == nil {
|
||||
firstInvalid = node
|
||||
}
|
||||
s.nodesLock.Unlock()
|
||||
return s.removeNode(ctx, firstInvalid)
|
||||
}
|
||||
|
||||
// removeNode removes the node with the given root and all of its children
|
||||
// from the Fork Choice Store.
|
||||
func (s *Store) removeNode(ctx context.Context, root [32]byte) ([][32]byte, error) {
|
||||
func (s *Store) removeNode(ctx context.Context, node *Node) ([][32]byte, error) {
|
||||
s.nodesLock.Lock()
|
||||
defer s.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
|
||||
node, ok := s.nodeByRoot[root]
|
||||
if !ok || node == nil {
|
||||
if node == nil {
|
||||
return invalidRoots, ErrNilNode
|
||||
}
|
||||
if !node.optimistic || node.parent == nil {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
|
||||
children := node.parent.children
|
||||
if len(children) == 1 {
|
||||
node.parent.children = []*Node{}
|
||||
@@ -47,6 +79,16 @@ func (s *Store) removeNodeAndChildren(ctx context.Context, node *Node, invalidRo
|
||||
}
|
||||
}
|
||||
invalidRoots = append(invalidRoots, node.root)
|
||||
s.proposerBoostLock.Lock()
|
||||
if node.root == s.proposerBoostRoot {
|
||||
s.proposerBoostRoot = [32]byte{}
|
||||
}
|
||||
if node.root == s.previousProposerBoostRoot {
|
||||
s.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
s.previousProposerBoostScore = 0
|
||||
}
|
||||
s.proposerBoostLock.Unlock()
|
||||
delete(s.nodeByRoot, node.root)
|
||||
delete(s.nodeByPayload, node.payloadHash)
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -24,32 +24,71 @@ import (
|
||||
func TestPruneInvalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new INVALID block
|
||||
payload [32]byte // the last valid hash
|
||||
wantedNodeNumber int
|
||||
wantedRoots [][32]byte
|
||||
}{
|
||||
{
|
||||
[32]byte{'j'},
|
||||
[32]byte{'B'},
|
||||
12,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'c'},
|
||||
[32]byte{'B'},
|
||||
4,
|
||||
[][32]byte{[32]byte{'f'}, [32]byte{'e'}, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'},
|
||||
[32]byte{'k'}, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'c'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'H'},
|
||||
12,
|
||||
[][32]byte{[32]byte{'i'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'G'},
|
||||
11,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'D'},
|
||||
8,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'D'},
|
||||
11,
|
||||
[][32]byte{[32]byte{'f'}, [32]byte{'e'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'h'},
|
||||
[32]byte{'C'},
|
||||
5,
|
||||
[][32]byte{
|
||||
[32]byte{'f'},
|
||||
[32]byte{'e'},
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
[32]byte{'l'},
|
||||
[32]byte{'k'},
|
||||
[32]byte{'g'},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
[32]byte{'E'},
|
||||
8,
|
||||
[][32]byte{[32]byte{'i'}, [32]byte{'h'}, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'g'}},
|
||||
},
|
||||
@@ -58,22 +97,45 @@ func TestPruneInvalid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
|
||||
roots, err := f.store.removeNode(context.Background(), tc.root)
|
||||
roots, err := f.store.setOptimisticToInvalid(context.Background(), tc.root, tc.payload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.wantedRoots, roots)
|
||||
require.Equal(t, tc.wantedNodeNumber, f.NodeCount())
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test (10445)
|
||||
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{'c'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
@@ -2,12 +2,10 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// BoostProposerRoot sets the block root which should be boosted during
|
||||
@@ -19,17 +17,18 @@ import (
|
||||
// is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
// if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
// store.proposer_boost_root = hash_tree_root(block)
|
||||
func (f *ForkChoice) BoostProposerRoot(_ context.Context, blockSlot types.Slot, blockRoot [32]byte, genesisTime time.Time) error {
|
||||
func (f *ForkChoice) BoostProposerRoot(_ context.Context, args *forkchoicetypes.ProposerBoostRootArgs) error {
|
||||
if args == nil {
|
||||
return errors.New("nil function args provided to BoostProposerRoot")
|
||||
}
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
timeIntoSlot := uint64(time.Since(genesisTime).Seconds()) % secondsPerSlot
|
||||
isBeforeAttestingInterval := timeIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
|
||||
currentSlot := slots.SinceGenesis(genesisTime)
|
||||
isBeforeAttestingInterval := args.SecondsIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
|
||||
|
||||
// Only update the boosted proposer root to the incoming block root
|
||||
// if the block is for the current, clock-based slot and the block was timely.
|
||||
if currentSlot == blockSlot && isBeforeAttestingInterval {
|
||||
if args.CurrentSlot == args.BlockSlot && isBeforeAttestingInterval {
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = blockRoot
|
||||
f.store.proposerBoostRoot = args.BlockRoot
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -2,11 +2,11 @@ package doublylinkedtree
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -26,6 +26,11 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
balances[i] = 10
|
||||
}
|
||||
jEpoch, fEpoch := types.Epoch(0), types.Epoch(0)
|
||||
t.Run("nil args check", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
err := f.BoostProposerRoot(ctx, nil)
|
||||
require.ErrorContains(t, "nil function args", err)
|
||||
})
|
||||
t.Run("back-propagates boost score to ancestors after proposer boosting", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -128,9 +133,14 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
threeSlots := 3 * params.BeaconConfig().SecondsPerSlot
|
||||
genesisTime := time.Now().Add(-time.Second * time.Duration(threeSlots))
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, slot, newRoot, genesisTime))
|
||||
clockSlot := types.Slot(3)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
CurrentSlot: clockSlot,
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
@@ -221,9 +231,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
// We boost the honest proposal at slot 2.
|
||||
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := time.Now().Add(-2 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, honestBlockSlot, honestBlock, genesis))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: honestBlock,
|
||||
BlockSlot: honestBlockSlot,
|
||||
CurrentSlot: types.Slot(2),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// The maliciously withheld block has one vote.
|
||||
votes := []uint64{1}
|
||||
@@ -289,9 +303,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
// We boost the honest proposal at slot 2.
|
||||
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := time.Now().Add(-2 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, honestBlockSlot, honestBlock, genesis))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: honestBlock,
|
||||
BlockSlot: honestBlockSlot,
|
||||
CurrentSlot: types.Slot(2),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// An attestation is received for B that has more voting power than C with the proposer boost,
|
||||
// allowing B to then become the head if their attestation has enough adversarial votes.
|
||||
@@ -345,9 +363,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
// We boost C.
|
||||
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := time.Now().Add(-2 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, cSlot /* slot */, c, genesis))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: c,
|
||||
BlockSlot: cSlot,
|
||||
CurrentSlot: types.Slot(2),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
@@ -393,8 +415,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, c, r, "Expected C to remain the head")
|
||||
|
||||
// Block D receives the boost.
|
||||
genesis = time.Now().Add(-3 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, dSlot /* slot */, d, genesis))
|
||||
args = &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: d,
|
||||
BlockSlot: dSlot,
|
||||
CurrentSlot: types.Slot(3),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// Ensure D becomes the head thanks to boosting.
|
||||
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
@@ -415,12 +442,15 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
f := &ForkChoice{
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago.
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
blockRoot := [32]byte{'A'}
|
||||
|
||||
// Trying to boost a block from slot 0 should not work.
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(0), blockRoot, genesis)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(0),
|
||||
CurrentSlot: types.Slot(1),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
})
|
||||
@@ -429,14 +459,18 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago + X where X > attesting interval.
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
attestingInterval := time.Duration(cfg.SecondsPerSlot / cfg.IntervalsPerSlot)
|
||||
greaterThanAttestingInterval := attestingInterval + 100*time.Millisecond
|
||||
genesis = genesis.Add(-greaterThanAttestingInterval * time.Second)
|
||||
blockRoot := [32]byte{'A'}
|
||||
attestingInterval := time.Duration(cfg.SecondsPerSlot/cfg.IntervalsPerSlot) * time.Second
|
||||
greaterThanAttestingInterval := attestingInterval + time.Second
|
||||
|
||||
// Trying to boost a block from slot 1 that is untimely should not work.
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
|
||||
blockRoot := [32]byte{'A'}
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(1),
|
||||
CurrentSlot: 1,
|
||||
SecondsIntoSlot: uint64(greaterThanAttestingInterval.Seconds()),
|
||||
}
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
})
|
||||
@@ -445,11 +479,15 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago + 0 seconds into the attesting interval.
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
fmt.Println(genesis)
|
||||
blockRoot := [32]byte{'A'}
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(1),
|
||||
CurrentSlot: types.Slot(1),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
|
||||
})
|
||||
@@ -457,13 +495,16 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
f := &ForkChoice{
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago + (attesting interval / 2).
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
blockRoot := [32]byte{'A'}
|
||||
halfAttestingInterval := time.Second
|
||||
genesis = genesis.Add(-halfAttestingInterval)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(1),
|
||||
CurrentSlot: types.Slot(1),
|
||||
SecondsIntoSlot: uint64(halfAttestingInterval.Seconds()),
|
||||
}
|
||||
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
|
||||
})
|
||||
|
||||
@@ -121,6 +121,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
payloadHash: payloadHash,
|
||||
}
|
||||
|
||||
s.nodeByPayload[payloadHash] = n
|
||||
s.nodeByRoot[root] = n
|
||||
if parent != nil {
|
||||
parent.children = append(parent.children, n)
|
||||
|
||||
@@ -107,7 +107,8 @@ func TestStore_Insert(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
treeRootNode := &Node{slot: 0, root: indexToHash(0)}
|
||||
nodeByRoot := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode}
|
||||
nodeByPayload := map[[32]byte]*Node{indexToHash(0): treeRootNode}
|
||||
s := &Store{nodeByRoot: nodeByRoot, treeRootNode: treeRootNode, nodeByPayload: nodeByPayload}
|
||||
payloadHash := [32]byte{'a'}
|
||||
require.NoError(t, s.insert(context.Background(), 100, indexToHash(100), indexToHash(0), payloadHash, 1, 1))
|
||||
assert.Equal(t, 2, len(s.nodeByRoot), "Did not insert block")
|
||||
|
||||
@@ -26,6 +26,7 @@ type Store struct {
|
||||
treeRootNode *Node // the root node of the store tree.
|
||||
headNode *Node // last head Node
|
||||
nodeByRoot map[[fieldparams.RootLength]byte]*Node // nodes indexed by roots.
|
||||
nodeByPayload map[[fieldparams.RootLength]byte]*Node // nodes indexed by payload Hash
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
}
|
||||
|
||||
@@ -2,9 +2,9 @@ package forkchoice
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
)
|
||||
@@ -24,7 +24,7 @@ type ForkChoicer interface {
|
||||
type HeadRetriever interface {
|
||||
Head(context.Context, types.Epoch, [32]byte, []uint64, types.Epoch) ([32]byte, error)
|
||||
Tips() ([][32]byte, []types.Slot)
|
||||
IsOptimistic(ctx context.Context, root [32]byte) (bool, error)
|
||||
IsOptimistic(root [32]byte) (bool, error)
|
||||
}
|
||||
|
||||
// BlockProcessor processes the block that's used for accounting fork choice.
|
||||
@@ -51,7 +51,7 @@ type Pruner interface {
|
||||
|
||||
// ProposerBooster is able to boost the proposer's root score during fork choice.
|
||||
type ProposerBooster interface {
|
||||
BoostProposerRoot(ctx context.Context, blockSlot types.Slot, blockRoot [32]byte, genesisTime time.Time) error
|
||||
BoostProposerRoot(ctx context.Context, args *forkchoicetypes.ProposerBoostRootArgs) error
|
||||
ResetBoostedProposerRoot(ctx context.Context) error
|
||||
}
|
||||
|
||||
@@ -71,5 +71,5 @@ type Getter interface {
|
||||
// Setter allows to set forkchoice information
|
||||
type Setter interface {
|
||||
SetOptimisticToValid(context.Context, [fieldparams.RootLength]byte) error
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
SetOptimisticToInvalid(context.Context, [fieldparams.RootLength]byte, [fieldparams.RootLength]byte) ([][32]byte, error)
|
||||
}
|
||||
|
||||
@@ -19,15 +19,17 @@ go_library(
|
||||
"//testing/spectest:__subpackages__",
|
||||
],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
"//math:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//time/slots:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@io_opencensus_go//trace:go_default_library",
|
||||
],
|
||||
)
|
||||
@@ -46,6 +48,7 @@ go_test(
|
||||
],
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/forkchoice/types:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//crypto/hash:go_default_library",
|
||||
"//encoding/bytesutil:go_default_library",
|
||||
|
||||
@@ -5,11 +5,11 @@ import "errors"
|
||||
var errUnknownFinalizedRoot = errors.New("unknown finalized root")
|
||||
var errUnknownJustifiedRoot = errors.New("unknown justified root")
|
||||
var errInvalidNodeIndex = errors.New("node index is invalid")
|
||||
var errInvalidFinalizedNode = errors.New("invalid finalized block on chain")
|
||||
var ErrUnknownNodeRoot = errors.New("unknown block root")
|
||||
var errInvalidJustifiedIndex = errors.New("justified index is invalid")
|
||||
var errInvalidBestChildIndex = errors.New("best child index is invalid")
|
||||
var errInvalidBestDescendantIndex = errors.New("best descendant index is invalid")
|
||||
var errInvalidParentDelta = errors.New("parent delta is invalid")
|
||||
var errInvalidNodeDelta = errors.New("node delta is invalid")
|
||||
var errInvalidDeltaLength = errors.New("delta length is invalid")
|
||||
var errInvalidSyncedTips = errors.New("invalid synced tips")
|
||||
var errInvalidOptimisticStatus = errors.New("invalid optimistic status")
|
||||
|
||||
@@ -85,12 +85,9 @@ func copyNode(node *Node) *Node {
|
||||
return &Node{}
|
||||
}
|
||||
|
||||
copiedRoot := [32]byte{}
|
||||
copy(copiedRoot[:], node.root[:])
|
||||
|
||||
return &Node{
|
||||
slot: node.slot,
|
||||
root: copiedRoot,
|
||||
root: node.root,
|
||||
parent: node.parent,
|
||||
payloadHash: node.payloadHash,
|
||||
justifiedEpoch: node.justifiedEpoch,
|
||||
@@ -98,5 +95,6 @@ func copyNode(node *Node) *Node {
|
||||
weight: node.weight,
|
||||
bestChild: node.bestChild,
|
||||
bestDescendant: node.bestDescendant,
|
||||
status: node.status,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3,9 +3,12 @@ package protoarray
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var (
|
||||
log = logrus.WithField("prefix", "forkchoice-protoarray")
|
||||
|
||||
headSlotNumber = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_head_slot",
|
||||
@@ -48,16 +51,10 @@ var (
|
||||
Help: "The number of times pruning happened.",
|
||||
},
|
||||
)
|
||||
lastSyncedTipSlot = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_last_synced_tip_slot",
|
||||
Help: "The slot of the last fully validated block added to the proto array.",
|
||||
},
|
||||
)
|
||||
syncedTipsCount = promauto.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Name: "proto_array_synced_tips_count",
|
||||
Help: "The number of elements in the syncedTips structure.",
|
||||
validatedNodesCount = promauto.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Name: "proto_array_validated_nodes_count",
|
||||
Help: "The number of nodes that have been fully validated.",
|
||||
},
|
||||
)
|
||||
)
|
||||
|
||||
@@ -43,8 +43,3 @@ func (n *Node) BestChild() uint64 {
|
||||
func (n *Node) BestDescendant() uint64 {
|
||||
return n.bestDescendant
|
||||
}
|
||||
|
||||
// Graffiti of the fork choice node.
|
||||
func (n *Node) Graffiti() [32]byte {
|
||||
return n.graffiti
|
||||
}
|
||||
|
||||
@@ -16,7 +16,6 @@ func TestNode_Getters(t *testing.T) {
|
||||
weight := uint64(10000)
|
||||
bestChild := uint64(5)
|
||||
bestDescendant := uint64(4)
|
||||
graffiti := [32]byte{'b'}
|
||||
n := &Node{
|
||||
slot: slot,
|
||||
root: root,
|
||||
@@ -26,7 +25,6 @@ func TestNode_Getters(t *testing.T) {
|
||||
weight: weight,
|
||||
bestChild: bestChild,
|
||||
bestDescendant: bestDescendant,
|
||||
graffiti: graffiti,
|
||||
}
|
||||
|
||||
require.Equal(t, slot, n.Slot())
|
||||
@@ -37,5 +35,4 @@ func TestNode_Getters(t *testing.T) {
|
||||
require.Equal(t, weight, n.Weight())
|
||||
require.Equal(t, bestChild, n.BestChild())
|
||||
require.Equal(t, bestDescendant, n.BestDescendant())
|
||||
require.Equal(t, graffiti, n.Graffiti())
|
||||
}
|
||||
|
||||
@@ -3,314 +3,160 @@ package protoarray
|
||||
import (
|
||||
"context"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
)
|
||||
|
||||
// This returns the minimum and maximum slot of the synced_tips tree
|
||||
func (f *ForkChoice) boundarySyncedTips() (types.Slot, types.Slot) {
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
|
||||
min := params.BeaconConfig().FarFutureSlot
|
||||
max := types.Slot(0)
|
||||
for _, slot := range f.syncedTips.validatedTips {
|
||||
if slot > max {
|
||||
max = slot
|
||||
}
|
||||
if slot < min {
|
||||
min = slot
|
||||
}
|
||||
}
|
||||
return min, max
|
||||
}
|
||||
|
||||
// IsOptimistic returns true if this node is optimistically synced
|
||||
// A optimistically synced block is synced as usual, but its
|
||||
// execution payload is not validated, while the EL is still syncing.
|
||||
// This function returns an error if the block is not found in the fork choice
|
||||
// store
|
||||
func (f *ForkChoice) IsOptimistic(ctx context.Context, root [32]byte) (bool, error) {
|
||||
if ctx.Err() != nil {
|
||||
return false, ctx.Err()
|
||||
}
|
||||
func (f *ForkChoice) IsOptimistic(root [32]byte) (bool, error) {
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
index, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, ErrUnknownNodeRoot
|
||||
}
|
||||
node := f.store.nodes[index]
|
||||
slot := node.slot
|
||||
|
||||
// If the node is a synced tip, then it's fully validated
|
||||
f.syncedTips.RLock()
|
||||
_, ok = f.syncedTips.validatedTips[root]
|
||||
if ok {
|
||||
f.syncedTips.RUnlock()
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, nil
|
||||
}
|
||||
f.syncedTips.RUnlock()
|
||||
|
||||
// If the slot is higher than the max synced tip, it's optimistic
|
||||
min, max := f.boundarySyncedTips()
|
||||
if slot > max {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// If the slot is lower than the min synced tip, it's fully validated
|
||||
if slot <= min {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// if the node is a leaf of the Fork Choice tree, then it's
|
||||
// optimistic
|
||||
childIndex := node.BestChild()
|
||||
if childIndex == NonExistentNode {
|
||||
f.store.nodesLock.RUnlock()
|
||||
return true, nil
|
||||
}
|
||||
|
||||
// recurse to the child
|
||||
child := f.store.nodes[childIndex]
|
||||
root = child.root
|
||||
f.store.nodesLock.RUnlock()
|
||||
return f.IsOptimistic(ctx, root)
|
||||
}
|
||||
|
||||
// This function returns the index of sync tip node that's ancestor to the input node.
|
||||
// In the event of none, `NonExistentNode` is returned.
|
||||
// This internal method assumes the caller holds a lock on syncedTips and s.nodesLock
|
||||
func (s *Store) findSyncedTip(ctx context.Context, node *Node, syncedTips *optimisticStore) (uint64, error) {
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return 0, ctx.Err()
|
||||
}
|
||||
if _, ok := syncedTips.validatedTips[node.root]; ok {
|
||||
return s.nodesIndices[node.root], nil
|
||||
}
|
||||
if node.parent == NonExistentNode {
|
||||
return NonExistentNode, nil
|
||||
}
|
||||
node = s.nodes[node.parent]
|
||||
}
|
||||
return node.status == syncing, nil
|
||||
}
|
||||
|
||||
// SetOptimisticToValid is called with the root of a block that was returned as
|
||||
// VALID by the EL. This routine recomputes and updates the synced_tips map to
|
||||
// account for this new tip.
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice or
|
||||
// if the root is not a leaf of the fork choice tree.
|
||||
// VALID by the EL.
|
||||
// WARNING: This method returns an error if the root is not found in forkchoice
|
||||
func (f *ForkChoice) SetOptimisticToValid(ctx context.Context, root [32]byte) error {
|
||||
f.store.nodesLock.RLock()
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
// We can only update if given root is in Fork Choice
|
||||
index, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return errInvalidNodeIndex
|
||||
return ErrUnknownNodeRoot
|
||||
}
|
||||
node := f.store.nodes[index]
|
||||
f.store.nodesLock.RUnlock()
|
||||
|
||||
// Stop early if the node is Valid
|
||||
optimistic, err := f.IsOptimistic(ctx, root)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !optimistic {
|
||||
return nil
|
||||
}
|
||||
f.store.nodesLock.RLock()
|
||||
defer f.store.nodesLock.RUnlock()
|
||||
|
||||
// Cache root and slot to validated tips
|
||||
newTips := make(map[[32]byte]types.Slot)
|
||||
newValidSlot := node.slot
|
||||
newTips[root] = newValidSlot
|
||||
|
||||
// Compute the full valid path from the given node to its previous synced tip
|
||||
// This path will now consist of fully validated blocks. Notice that
|
||||
// the previous tip may have been outside the Fork Choice store.
|
||||
// In this case, only one block can be in syncedTips as the whole
|
||||
// Fork Choice would be a descendant of this block.
|
||||
validPath := make(map[uint64]bool)
|
||||
validPath[index] = true
|
||||
for {
|
||||
for node := f.store.nodes[index]; node.status == syncing; node = f.store.nodes[index] {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
|
||||
parentIndex := node.parent
|
||||
if parentIndex == NonExistentNode {
|
||||
node.status = valid
|
||||
index = node.parent
|
||||
if index == NonExistentNode {
|
||||
break
|
||||
}
|
||||
if parentIndex >= uint64(len(f.store.nodes)) {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
node = f.store.nodes[parentIndex]
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
validPath[parentIndex] = true
|
||||
validatedNodesCount.Inc()
|
||||
}
|
||||
|
||||
// Retrieve the list of leaves in the Fork Choice
|
||||
// These are all the nodes that have NonExistentNode as best child.
|
||||
leaves, err := f.store.leaves()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// For each leaf, recompute the new tip.
|
||||
for _, i := range leaves {
|
||||
node = f.store.nodes[i]
|
||||
j := i
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return ctx.Err()
|
||||
}
|
||||
// Stop if we reached the previous tip
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
newTips[node.root] = node.slot
|
||||
break
|
||||
}
|
||||
|
||||
// Stop if we reach valid path
|
||||
_, ok = validPath[j]
|
||||
if ok {
|
||||
newTips[node.root] = node.slot
|
||||
break
|
||||
}
|
||||
|
||||
j = node.parent
|
||||
if j == NonExistentNode {
|
||||
break
|
||||
}
|
||||
if j >= uint64(len(f.store.nodes)) {
|
||||
return errInvalidNodeIndex
|
||||
}
|
||||
node = f.store.nodes[j]
|
||||
}
|
||||
}
|
||||
|
||||
f.syncedTips.validatedTips = newTips
|
||||
lastSyncedTipSlot.Set(float64(newValidSlot))
|
||||
syncedTipsCount.Set(float64(len(newTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root [32]byte) ([][32]byte, error) {
|
||||
// SetOptimisticToInvalid updates the synced_tips map when the block with the given root becomes INVALID.
|
||||
// It takes two parameters: the root of the INVALID block and the payload Hash
|
||||
// of the last valid block.s
|
||||
func (f *ForkChoice) SetOptimisticToInvalid(ctx context.Context, root, payloadHash [32]byte) ([][32]byte, error) {
|
||||
f.store.nodesLock.Lock()
|
||||
defer f.store.nodesLock.Unlock()
|
||||
invalidRoots := make([][32]byte, 0)
|
||||
idx, ok := f.store.nodesIndices[root]
|
||||
// We only support setting invalid a node existing in Forkchoice
|
||||
invalidIndex, ok := f.store.nodesIndices[root]
|
||||
if !ok {
|
||||
return invalidRoots, errInvalidNodeIndex
|
||||
return invalidRoots, ErrUnknownNodeRoot
|
||||
}
|
||||
node := f.store.nodes[idx]
|
||||
// We only support changing status for the tips in Fork Choice store.
|
||||
if node.bestChild != NonExistentNode {
|
||||
return invalidRoots, errInvalidNodeIndex
|
||||
node := f.store.nodes[invalidIndex]
|
||||
|
||||
lastValidIndex, ok := f.store.payloadIndices[payloadHash]
|
||||
if !ok || lastValidIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidFinalizedNode
|
||||
}
|
||||
|
||||
parentIndex := node.parent
|
||||
// This should not happen
|
||||
if parentIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidNodeIndex
|
||||
// Check if last valid hash is an ancestor of the passed node
|
||||
firstInvalidIndex := node.parent
|
||||
for ; firstInvalidIndex != NonExistentNode && firstInvalidIndex != lastValidIndex; firstInvalidIndex = node.parent {
|
||||
node = f.store.nodes[firstInvalidIndex]
|
||||
}
|
||||
// Update the weights of the nodes subtracting the INVALID node's weight
|
||||
|
||||
// if the last valid hash is not an ancestor of the invalid block, we
|
||||
// just remove the invalid block.
|
||||
if node.parent != lastValidIndex {
|
||||
node = f.store.nodes[invalidIndex]
|
||||
firstInvalidIndex = invalidIndex
|
||||
lastValidIndex = node.parent
|
||||
if lastValidIndex == NonExistentNode {
|
||||
return invalidRoots, errInvalidFinalizedNode
|
||||
}
|
||||
} else {
|
||||
firstInvalidIndex = f.store.nodesIndices[node.root]
|
||||
}
|
||||
|
||||
// Update the weights of the nodes subtracting the first INVALID node's weight
|
||||
weight := node.weight
|
||||
node = f.store.nodes[parentIndex]
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
node.weight -= weight
|
||||
if node.parent == NonExistentNode {
|
||||
break
|
||||
}
|
||||
node = f.store.nodes[node.parent]
|
||||
}
|
||||
parent := copyNode(f.store.nodes[parentIndex])
|
||||
|
||||
// delete the invalid node, order is important
|
||||
f.store.nodes = append(f.store.nodes[:idx], f.store.nodes[idx+1:]...)
|
||||
delete(f.store.nodesIndices, root)
|
||||
invalidRoots = append(invalidRoots, root)
|
||||
// Fix parent and best child for each node
|
||||
for _, node := range f.store.nodes {
|
||||
if node.parent == NonExistentNode {
|
||||
node.parent = NonExistentNode
|
||||
} else if node.parent > idx {
|
||||
node.parent -= 1
|
||||
}
|
||||
if node.bestChild == NonExistentNode || node.bestChild == idx {
|
||||
node.bestChild = NonExistentNode
|
||||
} else if node.bestChild > idx {
|
||||
node.bestChild -= 1
|
||||
}
|
||||
if node.bestDescendant == NonExistentNode || node.bestDescendant == idx {
|
||||
node.bestDescendant = NonExistentNode
|
||||
} else if node.bestDescendant > idx {
|
||||
node.bestDescendant -= 1
|
||||
}
|
||||
var validNode *Node
|
||||
for index := lastValidIndex; index != NonExistentNode; index = validNode.parent {
|
||||
validNode = f.store.nodes[index]
|
||||
validNode.weight -= weight
|
||||
}
|
||||
|
||||
// Update the parent's best child and best descendant if necessary.
|
||||
if parent.bestChild == idx || parent.bestDescendant == idx {
|
||||
for childIndex, child := range f.store.nodes {
|
||||
if child.parent == parentIndex {
|
||||
err := f.store.updateBestChildAndDescendant(
|
||||
parentIndex, uint64(childIndex))
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
break
|
||||
// Find the current proposer boost (it should be set to zero if an
|
||||
// INVALID block was boosted)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
boostRoot := f.store.proposerBoostRoot
|
||||
previousBoostRoot := f.store.previousProposerBoostRoot
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
|
||||
// Remove the invalid roots from our store maps and adjust their weight
|
||||
// to zero
|
||||
boosted := node.root == boostRoot
|
||||
previouslyBoosted := node.root == previousBoostRoot
|
||||
|
||||
invalidIndices := map[uint64]bool{firstInvalidIndex: true}
|
||||
node.status = invalid
|
||||
node.weight = 0
|
||||
delete(f.store.nodesIndices, node.root)
|
||||
delete(f.store.canonicalNodes, node.root)
|
||||
delete(f.store.payloadIndices, node.payloadHash)
|
||||
for index := firstInvalidIndex + 1; index < uint64(len(f.store.nodes)); index++ {
|
||||
invalidNode := f.store.nodes[index]
|
||||
if _, ok := invalidIndices[invalidNode.parent]; !ok {
|
||||
continue
|
||||
}
|
||||
if invalidNode.status == valid {
|
||||
return invalidRoots, errInvalidOptimisticStatus
|
||||
}
|
||||
if !boosted && invalidNode.root == boostRoot {
|
||||
boosted = true
|
||||
}
|
||||
if !previouslyBoosted && invalidNode.root == previousBoostRoot {
|
||||
previouslyBoosted = true
|
||||
}
|
||||
invalidNode.status = invalid
|
||||
invalidIndices[index] = true
|
||||
invalidNode.weight = 0
|
||||
delete(f.store.nodesIndices, invalidNode.root)
|
||||
delete(f.store.canonicalNodes, invalidNode.root)
|
||||
delete(f.store.payloadIndices, invalidNode.payloadHash)
|
||||
}
|
||||
if boosted {
|
||||
if err := f.ResetBoostedProposerRoot(ctx); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
if previouslyBoosted {
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.previousProposerBoostRoot = params.BeaconConfig().ZeroHash
|
||||
f.store.previousProposerBoostScore = 0
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
}
|
||||
|
||||
for index := range invalidIndices {
|
||||
invalidRoots = append(invalidRoots, f.store.nodes[index].root)
|
||||
}
|
||||
|
||||
// Update the best child and descendant
|
||||
for i := len(f.store.nodes) - 1; i >= 0; i-- {
|
||||
n := f.store.nodes[i]
|
||||
if n.parent != NonExistentNode {
|
||||
if err := f.store.updateBestChildAndDescendant(n.parent, uint64(i)); err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Return early if the parent is not a synced_tip.
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
parentRoot := parent.root
|
||||
_, ok = f.syncedTips.validatedTips[parentRoot]
|
||||
if !ok {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
leaves, err := f.store.leaves()
|
||||
if err != nil {
|
||||
return invalidRoots, err
|
||||
}
|
||||
|
||||
for _, i := range leaves {
|
||||
node = f.store.nodes[i]
|
||||
for {
|
||||
if ctx.Err() != nil {
|
||||
return invalidRoots, ctx.Err()
|
||||
}
|
||||
|
||||
// Return early if the parent is still a synced tip
|
||||
if node.root == parentRoot {
|
||||
return invalidRoots, nil
|
||||
}
|
||||
_, ok = f.syncedTips.validatedTips[node.root]
|
||||
if ok {
|
||||
break
|
||||
}
|
||||
if node.parent == NonExistentNode {
|
||||
break
|
||||
}
|
||||
node = f.store.nodes[node.parent]
|
||||
}
|
||||
}
|
||||
delete(f.syncedTips.validatedTips, parentRoot)
|
||||
syncedTipsCount.Set(float64(len(f.syncedTips.validatedTips)))
|
||||
return invalidRoots, nil
|
||||
}
|
||||
|
||||
@@ -10,116 +10,38 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
)
|
||||
|
||||
// We test the algorithm to check the optimistic status of a node. The
|
||||
// status for this test is the following branching diagram
|
||||
//
|
||||
// -- E -- F
|
||||
// /
|
||||
// -- C -- D
|
||||
// /
|
||||
// 0 -- 1 -- A -- B -- J -- K
|
||||
// \ /
|
||||
// -- G -- H -- I
|
||||
//
|
||||
// Here nodes 0, 1, A, B, C, D are fully validated and nodes
|
||||
// E, F, G, H, J, K are optimistic.
|
||||
// Synced Tips are nodes B, C, D
|
||||
// nodes 0 and 1 are outside the Fork Choice Store.
|
||||
func slicesEqual(a, b [][32]byte) bool {
|
||||
if len(a) != len(b) {
|
||||
return false
|
||||
}
|
||||
|
||||
func TestOptimistic(t *testing.T) {
|
||||
mapA := make(map[[32]byte]bool, len(a))
|
||||
for _, root := range a {
|
||||
mapA[root] = true
|
||||
}
|
||||
for _, root := range b {
|
||||
_, ok := mapA[root]
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func TestOptimistic_Outside_ForkChoice(t *testing.T) {
|
||||
root0 := bytesutil.ToBytes32([]byte("hello0"))
|
||||
root1 := bytesutil.ToBytes32([]byte("hello1"))
|
||||
|
||||
nodeA := &Node{
|
||||
slot: types.Slot(100),
|
||||
root: bytesutil.ToBytes32([]byte("helloA")),
|
||||
bestChild: 1,
|
||||
}
|
||||
nodeB := &Node{
|
||||
slot: types.Slot(101),
|
||||
root: bytesutil.ToBytes32([]byte("helloB")),
|
||||
bestChild: 2,
|
||||
parent: 0,
|
||||
}
|
||||
nodeC := &Node{
|
||||
slot: types.Slot(102),
|
||||
root: bytesutil.ToBytes32([]byte("helloC")),
|
||||
bestChild: 3,
|
||||
parent: 1,
|
||||
}
|
||||
nodeD := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloD")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 2,
|
||||
}
|
||||
nodeE := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloE")),
|
||||
bestChild: 5,
|
||||
parent: 2,
|
||||
}
|
||||
nodeF := &Node{
|
||||
slot: types.Slot(104),
|
||||
root: bytesutil.ToBytes32([]byte("helloF")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 4,
|
||||
}
|
||||
nodeG := &Node{
|
||||
slot: types.Slot(102),
|
||||
root: bytesutil.ToBytes32([]byte("helloG")),
|
||||
bestChild: 7,
|
||||
parent: 1,
|
||||
}
|
||||
nodeH := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloH")),
|
||||
bestChild: 8,
|
||||
parent: 6,
|
||||
}
|
||||
nodeI := &Node{
|
||||
slot: types.Slot(104),
|
||||
root: bytesutil.ToBytes32([]byte("helloI")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 7,
|
||||
}
|
||||
nodeJ := &Node{
|
||||
slot: types.Slot(103),
|
||||
root: bytesutil.ToBytes32([]byte("helloJ")),
|
||||
bestChild: 10,
|
||||
parent: 6,
|
||||
}
|
||||
nodeK := &Node{
|
||||
slot: types.Slot(104),
|
||||
root: bytesutil.ToBytes32([]byte("helloK")),
|
||||
bestChild: NonExistentNode,
|
||||
parent: 9,
|
||||
status: valid,
|
||||
}
|
||||
nodes := []*Node{
|
||||
nodeA,
|
||||
nodeB,
|
||||
nodeC,
|
||||
nodeD,
|
||||
nodeE,
|
||||
nodeF,
|
||||
nodeG,
|
||||
nodeH,
|
||||
nodeI,
|
||||
nodeJ,
|
||||
nodeK,
|
||||
}
|
||||
ni := map[[32]byte]uint64{
|
||||
nodeA.root: 0,
|
||||
nodeB.root: 1,
|
||||
nodeC.root: 2,
|
||||
nodeD.root: 3,
|
||||
nodeE.root: 4,
|
||||
nodeF.root: 5,
|
||||
nodeG.root: 6,
|
||||
nodeH.root: 7,
|
||||
nodeI.root: 8,
|
||||
nodeJ.root: 9,
|
||||
nodeK.root: 10,
|
||||
}
|
||||
|
||||
s := &Store{
|
||||
@@ -127,82 +49,14 @@ func TestOptimistic(t *testing.T) {
|
||||
nodesIndices: ni,
|
||||
}
|
||||
|
||||
tips := map[[32]byte]types.Slot{
|
||||
nodeB.root: nodeB.slot,
|
||||
nodeC.root: nodeC.slot,
|
||||
nodeD.root: nodeD.slot,
|
||||
}
|
||||
st := &optimisticStore{
|
||||
validatedTips: tips,
|
||||
}
|
||||
f := &ForkChoice{
|
||||
store: s,
|
||||
syncedTips: st,
|
||||
store: s,
|
||||
}
|
||||
ctx := context.Background()
|
||||
// We test the implementation of boundarySyncedTips
|
||||
min, max := f.boundarySyncedTips()
|
||||
require.Equal(t, min, types.Slot(101), "minimum tip slot is different")
|
||||
require.Equal(t, max, types.Slot(103), "maximum tip slot is different")
|
||||
|
||||
// We test first nodes outside the Fork Choice store
|
||||
_, err := f.IsOptimistic(ctx, root0)
|
||||
_, err := f.IsOptimistic(root0)
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
|
||||
_, err = f.IsOptimistic(ctx, root1)
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
|
||||
// We check all nodes in the Fork Choice store.
|
||||
op, err := f.IsOptimistic(ctx, nodeA.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeB.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeC.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeD.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, false)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeE.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeF.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeG.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeH.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeI.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeJ.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
op, err = f.IsOptimistic(ctx, nodeK.root)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, op, true)
|
||||
|
||||
// request a write Lock to synced Tips regression #10289
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
}
|
||||
|
||||
// This tests the algorithm to update syncedTips
|
||||
// This tests the algorithm to update optimistic Status
|
||||
// We start with the following diagram
|
||||
//
|
||||
// E -- F
|
||||
@@ -213,165 +67,105 @@ func TestOptimistic(t *testing.T) {
|
||||
// \ \
|
||||
// J -- K -- L
|
||||
//
|
||||
// And every block in the Fork choice is optimistic. Synced_Tips contains a
|
||||
// single block that is outside of Fork choice
|
||||
// The Chain A -- B -- C -- D -- E is VALID.
|
||||
//
|
||||
func TestSetOptimisticToValid(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new VALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
newTips map[[32]byte]types.Slot // the updated synced tips
|
||||
wantedErr error
|
||||
root [32]byte // the root of the new VALID block
|
||||
testRoot [32]byte // root of the node we will test optimistic status
|
||||
wantedOptimistic bool // wanted optimistic status for tested node
|
||||
wantedErr error // wanted error message
|
||||
}{
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{[32]byte{'z'}: 90},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'i'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'f'},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 103,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'e'}: 104,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
[32]byte{'j'}: 102,
|
||||
},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'i'},
|
||||
[32]byte{'h'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'z'}: 90,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'h'}: 105,
|
||||
},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'b'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'b'},
|
||||
[32]byte{'h'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 104,
|
||||
},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'e'}: 104,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
[32]byte{'a'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'k'},
|
||||
[32]byte{'k'},
|
||||
false,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'k'},
|
||||
[32]byte{'l'},
|
||||
true,
|
||||
nil,
|
||||
},
|
||||
{
|
||||
[32]byte{'p'},
|
||||
map[[32]byte]types.Slot{},
|
||||
map[[32]byte]types.Slot{},
|
||||
errInvalidNodeIndex,
|
||||
[32]byte{},
|
||||
false,
|
||||
ErrUnknownNodeRoot,
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
f.syncedTips.Unlock()
|
||||
err := f.SetOptimisticToValid(context.Background(), tc.root)
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
require.NoError(t, f.SetOptimisticToValid(context.Background(), [32]byte{'e'}))
|
||||
optimistic, err := f.IsOptimistic([32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, optimistic)
|
||||
|
||||
err = f.SetOptimisticToValid(context.Background(), tc.root)
|
||||
if tc.wantedErr != nil {
|
||||
require.ErrorIs(t, err, tc.wantedErr)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
f.syncedTips.RLock()
|
||||
require.DeepEqual(t, f.syncedTips.validatedTips, tc.newTips)
|
||||
f.syncedTips.RUnlock()
|
||||
optimistic, err := f.IsOptimistic(tc.testRoot)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wantedOptimistic, optimistic)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -387,70 +181,81 @@ func TestSetOptimisticToValid(t *testing.T) {
|
||||
// \ \
|
||||
// J(1) -- K(1) -- L(0)
|
||||
//
|
||||
// And every block in the Fork choice is optimistic. Synced_Tips contains a
|
||||
// single block that is outside of Fork choice. The numbers in parentheses are
|
||||
// the weights of the nodes before removal
|
||||
// And the chain A -- B -- C -- D -- E has been fully validated. The numbers in parentheses are
|
||||
// the weights of the nodes.
|
||||
//
|
||||
func TestSetOptimisticToInvalid(t *testing.T) {
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the new INVALID block
|
||||
tips map[[32]byte]types.Slot // the old synced tips
|
||||
wantedParentTip bool
|
||||
name string // test description
|
||||
root [32]byte // the root of the new INVALID block
|
||||
payload [32]byte // the payload of the last valid hash
|
||||
newBestChild uint64
|
||||
newBestDescendant uint64
|
||||
newParentWeight uint64
|
||||
returnedRoots [][32]byte
|
||||
}{
|
||||
{
|
||||
"Remove tip, parent was valid",
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
false,
|
||||
[32]byte{'B'},
|
||||
3,
|
||||
4,
|
||||
8,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
},
|
||||
true,
|
||||
3,
|
||||
4,
|
||||
12,
|
||||
8,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, parent was optimistic",
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'h'}: 105,
|
||||
},
|
||||
true,
|
||||
[32]byte{'H'},
|
||||
NonExistentNode,
|
||||
NonExistentNode,
|
||||
1,
|
||||
[][32]byte{[32]byte{'i'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, lvh is inner and valid",
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
false,
|
||||
[32]byte{'D'},
|
||||
6,
|
||||
8,
|
||||
3,
|
||||
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
|
||||
},
|
||||
{
|
||||
"Remove inner, lvh is inner and optimistic",
|
||||
[32]byte{'h'},
|
||||
[32]byte{'G'},
|
||||
10,
|
||||
12,
|
||||
2,
|
||||
[][32]byte{[32]byte{'h'}, [32]byte{'i'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, lvh is inner and optimistic",
|
||||
[32]byte{'l'},
|
||||
[32]byte{'G'},
|
||||
9,
|
||||
11,
|
||||
2,
|
||||
[][32]byte{[32]byte{'k'}, [32]byte{'l'}},
|
||||
},
|
||||
{
|
||||
"Remove tip, lvh is not an ancestor",
|
||||
[32]byte{'j'},
|
||||
[32]byte{'C'},
|
||||
5,
|
||||
12,
|
||||
7,
|
||||
[][32]byte{[32]byte{'j'}},
|
||||
},
|
||||
{
|
||||
"Remove inner, lvh is not an ancestor",
|
||||
[32]byte{'g'},
|
||||
[32]byte{'J'},
|
||||
NonExistentNode,
|
||||
NonExistentNode,
|
||||
1,
|
||||
[][32]byte{[32]byte{'i'}},
|
||||
[][32]byte{[32]byte{'g'}, [32]byte{'h'}, [32]byte{'k'}, [32]byte{'i'}, [32]byte{'l'}},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
@@ -458,184 +263,71 @@ func TestSetOptimisticToInvalid(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, [32]byte{'J'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, [32]byte{'D'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, [32]byte{'E'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, [32]byte{'G'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, [32]byte{'F'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, [32]byte{'H'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, [32]byte{'K'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, [32]byte{'I'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, [32]byte{'L'}, 1, 1))
|
||||
weights := []uint64{10, 10, 9, 7, 1, 6, 2, 3, 1, 1, 1, 0, 0}
|
||||
f.syncedTips.Lock()
|
||||
f.syncedTips.validatedTips = tc.tips
|
||||
f.syncedTips.Unlock()
|
||||
f.store.nodesLock.Lock()
|
||||
for i, node := range f.store.nodes {
|
||||
node.weight = weights[i]
|
||||
}
|
||||
// Make j be the best child and descendant of b
|
||||
nodeB := f.store.nodes[2]
|
||||
nodeB.bestChild = 4
|
||||
nodeB.bestDescendant = 4
|
||||
idx := f.store.nodesIndices[tc.root]
|
||||
node := f.store.nodes[idx]
|
||||
parentIndex := node.parent
|
||||
require.NotEqual(t, NonExistentNode, parentIndex)
|
||||
parent := f.store.nodes[parentIndex]
|
||||
f.store.nodesLock.Unlock()
|
||||
roots, err := f.SetOptimisticToInvalid(context.Background(), tc.root)
|
||||
require.NoError(t, f.SetOptimisticToValid(ctx, [32]byte{'e'}))
|
||||
roots, err := f.SetOptimisticToInvalid(ctx, tc.root, tc.payload)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, tc.returnedRoots, roots)
|
||||
f.syncedTips.RLock()
|
||||
_, parentSyncedTip := f.syncedTips.validatedTips[parent.root]
|
||||
f.syncedTips.RUnlock()
|
||||
require.Equal(t, tc.wantedParentTip, parentSyncedTip)
|
||||
require.Equal(t, tc.newBestChild, parent.bestChild)
|
||||
require.Equal(t, tc.newBestDescendant, parent.bestDescendant)
|
||||
require.Equal(t, tc.newParentWeight, parent.weight)
|
||||
}
|
||||
}
|
||||
|
||||
// This tests the algorithm to find the tip of a given node
|
||||
// We start with the following diagram
|
||||
//
|
||||
// E -- F
|
||||
// /
|
||||
// C -- D
|
||||
// / \
|
||||
// A -- B G -- H -- I
|
||||
// \ \
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
func TestFindSyncedTip(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'c'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'j'}, [32]byte{'b'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'d'}, [32]byte{'c'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'e'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 104, [32]byte{'g'}, [32]byte{'d'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'f'}, [32]byte{'e'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'h'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tests := []struct {
|
||||
root [32]byte // the root of the block
|
||||
tips map[[32]byte]types.Slot // the synced tips
|
||||
wanted [32]byte // the root of expected tip
|
||||
}{
|
||||
{
|
||||
[32]byte{'i'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 104,
|
||||
},
|
||||
[32]byte{'g'},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'h'}: 104,
|
||||
[32]byte{'k'}: 106,
|
||||
},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
{
|
||||
[32]byte{'e'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'g'}: 103,
|
||||
},
|
||||
[32]byte{'d'},
|
||||
},
|
||||
{
|
||||
[32]byte{'j'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'b'},
|
||||
},
|
||||
{
|
||||
[32]byte{'g'},
|
||||
map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'f'}: 105,
|
||||
[32]byte{'g'}: 104,
|
||||
[32]byte{'i'}: 106,
|
||||
},
|
||||
[32]byte{'g'},
|
||||
},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
f.store.nodesLock.RLock()
|
||||
node := f.store.nodes[f.store.nodesIndices[tc.root]]
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: tc.tips,
|
||||
}
|
||||
syncedTips.RLock()
|
||||
idx, err := f.store.findSyncedTip(ctx, node, syncedTips)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, tc.wanted, f.store.nodes[idx].root)
|
||||
|
||||
_, ok := f.store.nodesIndices[tc.root]
|
||||
require.Equal(t, false, ok)
|
||||
lvh := f.store.nodes[f.store.payloadIndices[tc.payload]]
|
||||
require.Equal(t, true, slicesEqual(tc.returnedRoots, roots))
|
||||
require.Equal(t, tc.newBestChild, lvh.bestChild)
|
||||
require.Equal(t, tc.newBestDescendant, lvh.bestDescendant)
|
||||
require.Equal(t, tc.newParentWeight, lvh.weight)
|
||||
require.Equal(t, syncing, f.store.nodes[8].status /* F */)
|
||||
require.Equal(t, valid, f.store.nodes[5].status /* E */)
|
||||
f.store.nodesLock.RUnlock()
|
||||
syncedTips.RUnlock()
|
||||
}
|
||||
}
|
||||
|
||||
// This is a regression test (10341)
|
||||
func TestIsOptimistic_DeadLock(t *testing.T) {
|
||||
func TestSetOptimisticToInvalid_InvalidRoots(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 90, [32]byte{'b'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 102, [32]byte{'d'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 103, [32]byte{'e'}, params.BeaconConfig().ZeroHash, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
tips := map[[32]byte]types.Slot{
|
||||
[32]byte{'a'}: 100,
|
||||
[32]byte{'d'}: 102,
|
||||
}
|
||||
f.syncedTips.validatedTips = tips
|
||||
_, err := f.IsOptimistic(ctx, [32]byte{'a'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'e'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'b'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
_, err = f.IsOptimistic(ctx, [32]byte{'c'})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Acquire a write lock, this should not hang
|
||||
f.store.nodesLock.Lock()
|
||||
f.store.nodesLock.Unlock()
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'p'}, [32]byte{'B'})
|
||||
require.ErrorIs(t, ErrUnknownNodeRoot, err)
|
||||
_, err = f.SetOptimisticToInvalid(ctx, [32]byte{'a'}, [32]byte{'p'})
|
||||
require.ErrorIs(t, errInvalidFinalizedNode, err)
|
||||
}
|
||||
|
||||
// This is a regression test (10445)
|
||||
func TestSetOptimisticToInvalid_ProposerBoost(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 100, [32]byte{'a'}, params.BeaconConfig().ZeroHash, [32]byte{'A'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'b'}, [32]byte{'a'}, [32]byte{'B'}, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 101, [32]byte{'c'}, [32]byte{'b'}, [32]byte{'C'}, 1, 1))
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = [32]byte{'c'}
|
||||
f.store.previousProposerBoostScore = 10
|
||||
f.store.previousProposerBoostRoot = [32]byte{'b'}
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
|
||||
_, err := f.SetOptimisticToInvalid(ctx, [32]byte{'c'}, [32]byte{'A'})
|
||||
require.NoError(t, err)
|
||||
f.store.proposerBoostLock.RLock()
|
||||
require.Equal(t, uint64(0), f.store.previousProposerBoostScore)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
require.DeepEqual(t, params.BeaconConfig().ZeroHash, f.store.previousProposerBoostRoot)
|
||||
f.store.proposerBoostLock.RUnlock()
|
||||
}
|
||||
|
||||
@@ -2,12 +2,10 @@ package protoarray
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/time/slots"
|
||||
)
|
||||
|
||||
// BoostProposerRoot sets the block root which should be boosted during
|
||||
@@ -19,17 +17,18 @@ import (
|
||||
// is_before_attesting_interval = time_into_slot < SECONDS_PER_SLOT // INTERVALS_PER_SLOT
|
||||
// if get_current_slot(store) == block.slot and is_before_attesting_interval:
|
||||
// store.proposer_boost_root = hash_tree_root(block)
|
||||
func (f *ForkChoice) BoostProposerRoot(_ context.Context, blockSlot types.Slot, blockRoot [32]byte, genesisTime time.Time) error {
|
||||
func (f *ForkChoice) BoostProposerRoot(_ context.Context, args *types.ProposerBoostRootArgs) error {
|
||||
if args == nil {
|
||||
return errors.New("nil function args provided to BoostProposerRoot")
|
||||
}
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
timeIntoSlot := uint64(time.Since(genesisTime).Seconds()) % secondsPerSlot
|
||||
isBeforeAttestingInterval := timeIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
|
||||
currentSlot := slots.SinceGenesis(genesisTime)
|
||||
isBeforeAttestingInterval := args.SecondsIntoSlot < secondsPerSlot/params.BeaconConfig().IntervalsPerSlot
|
||||
|
||||
// Only update the boosted proposer root to the incoming block root
|
||||
// if the block is for the current, clock-based slot and the block was timely.
|
||||
if currentSlot == blockSlot && isBeforeAttestingInterval {
|
||||
if args.CurrentSlot == args.BlockSlot && isBeforeAttestingInterval {
|
||||
f.store.proposerBoostLock.Lock()
|
||||
f.store.proposerBoostRoot = blockRoot
|
||||
f.store.proposerBoostRoot = args.BlockRoot
|
||||
f.store.proposerBoostLock.Unlock()
|
||||
}
|
||||
return nil
|
||||
|
||||
@@ -2,11 +2,11 @@ package protoarray
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
forkchoicetypes "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/testing/assert"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
@@ -20,12 +20,17 @@ import (
|
||||
// If the honest proposal is boosted at slot n+2, it will win against this attacker.
|
||||
func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
jEpoch, fEpoch := types.Epoch(0), types.Epoch(0)
|
||||
zeroHash := params.BeaconConfig().ZeroHash
|
||||
balances := make([]uint64, 64) // 64 active validators.
|
||||
for i := 0; i < len(balances); i++ {
|
||||
balances[i] = 10
|
||||
}
|
||||
jEpoch, fEpoch := types.Epoch(0), types.Epoch(0)
|
||||
t.Run("nil args check", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
err := f.BoostProposerRoot(ctx, nil)
|
||||
require.ErrorContains(t, "nil function args", err)
|
||||
})
|
||||
t.Run("back-propagates boost score to ancestors after proposer boosting", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
|
||||
@@ -88,8 +93,8 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// 2
|
||||
// |
|
||||
// 3 <- HEAD
|
||||
slot = types.Slot(2)
|
||||
newRoot = indexToHash(2)
|
||||
slot = types.Slot(3)
|
||||
newRoot = indexToHash(3)
|
||||
require.NoError(t,
|
||||
f.InsertOptimisticBlock(
|
||||
ctx,
|
||||
@@ -128,15 +133,20 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
),
|
||||
)
|
||||
f.ProcessAttestation(ctx, []uint64{3}, newRoot, fEpoch)
|
||||
threeSlots := 3 * params.BeaconConfig().SecondsPerSlot
|
||||
genesisTime := time.Now().Add(-time.Second * time.Duration(threeSlots))
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, slot, newRoot, genesisTime))
|
||||
clockSlot := types.Slot(3)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: newRoot,
|
||||
BlockSlot: slot,
|
||||
CurrentSlot: clockSlot,
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
headRoot, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, newRoot, headRoot, "Incorrect head for justified epoch at slot 3")
|
||||
|
||||
// Check the ancestor scores from the store.
|
||||
require.Equal(t, 4, len(f.store.nodes))
|
||||
require.Equal(t, 5, len(f.store.nodes))
|
||||
|
||||
// Expect nodes to have a boosted, back-propagated score.
|
||||
// Ancestors have the added weights of their children. Genesis is a special exception at 0 weight,
|
||||
@@ -163,7 +173,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
// middle instead of the normal progression of (44 -> 34 -> 24).
|
||||
require.Equal(t, f.store.nodes[1].weight, uint64(54))
|
||||
require.Equal(t, f.store.nodes[2].weight, uint64(44))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(24))
|
||||
require.Equal(t, f.store.nodes[3].weight, uint64(34))
|
||||
})
|
||||
t.Run("vanilla ex ante attack", func(t *testing.T) {
|
||||
f := setup(jEpoch, fEpoch)
|
||||
@@ -189,7 +199,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -206,7 +216,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -218,9 +228,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
// We boost the honest proposal at slot 2.
|
||||
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := time.Now().Add(-2 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, honestBlockSlot, honestBlock, genesis))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: honestBlock,
|
||||
BlockSlot: honestBlockSlot,
|
||||
CurrentSlot: types.Slot(2),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// The maliciously withheld block has one vote.
|
||||
votes := []uint64{1}
|
||||
@@ -255,7 +269,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
honestBlockSlot,
|
||||
honestBlock,
|
||||
zeroHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -274,7 +288,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
maliciouslyWithheldBlockSlot,
|
||||
maliciouslyWithheldBlock,
|
||||
zeroHash,
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -286,9 +300,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, honestBlock, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
// We boost the honest proposal at slot 2.
|
||||
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := time.Now().Add(-2 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, honestBlockSlot, honestBlock, genesis))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: honestBlock,
|
||||
BlockSlot: honestBlockSlot,
|
||||
CurrentSlot: types.Slot(2),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// An attestation is received for B that has more voting power than C with the proposer boost,
|
||||
// allowing B to then become the head if their attestation has enough adversarial votes.
|
||||
@@ -330,7 +348,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
cSlot,
|
||||
c,
|
||||
a, // parent
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -342,9 +360,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, c, r, "Incorrect head for justified epoch at slot 2")
|
||||
|
||||
// We boost C.
|
||||
secondsPerSlot := time.Second * time.Duration(params.BeaconConfig().SecondsPerSlot)
|
||||
genesis := time.Now().Add(-2 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, cSlot /* slot */, c, genesis))
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: c,
|
||||
BlockSlot: cSlot,
|
||||
CurrentSlot: types.Slot(2),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
bSlot := types.Slot(1)
|
||||
b := indexToHash(1)
|
||||
@@ -354,7 +376,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
bSlot,
|
||||
b,
|
||||
a, // parent
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -378,7 +400,7 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
dSlot,
|
||||
d,
|
||||
b, // parent
|
||||
params.BeaconConfig().ZeroHash,
|
||||
zeroHash,
|
||||
jEpoch,
|
||||
fEpoch,
|
||||
),
|
||||
@@ -390,8 +412,13 @@ func TestForkChoice_BoostProposerRoot_PreventsExAnteAttack(t *testing.T) {
|
||||
assert.Equal(t, c, r, "Expected C to remain the head")
|
||||
|
||||
// Block D receives the boost.
|
||||
genesis = time.Now().Add(-3 * secondsPerSlot)
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, dSlot /* slot */, d, genesis))
|
||||
args = &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: d,
|
||||
BlockSlot: dSlot,
|
||||
CurrentSlot: types.Slot(3),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
require.NoError(t, f.BoostProposerRoot(ctx, args))
|
||||
|
||||
// Ensure D becomes the head thanks to boosting.
|
||||
r, err = f.Head(ctx, jEpoch, zeroHash, balances, fEpoch)
|
||||
@@ -412,12 +439,15 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
f := &ForkChoice{
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago.
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
blockRoot := [32]byte{'A'}
|
||||
|
||||
// Trying to boost a block from slot 0 should not work.
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(0), blockRoot, genesis)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(0),
|
||||
CurrentSlot: types.Slot(1),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
})
|
||||
@@ -426,14 +456,18 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago + X where X > attesting interval.
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
attestingInterval := time.Duration(cfg.SecondsPerSlot / cfg.IntervalsPerSlot)
|
||||
greaterThanAttestingInterval := attestingInterval + 100*time.Millisecond
|
||||
genesis = genesis.Add(-greaterThanAttestingInterval * time.Second)
|
||||
blockRoot := [32]byte{'A'}
|
||||
attestingInterval := time.Duration(cfg.SecondsPerSlot/cfg.IntervalsPerSlot) * time.Second
|
||||
greaterThanAttestingInterval := attestingInterval + time.Second
|
||||
|
||||
// Trying to boost a block from slot 1 that is untimely should not work.
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
|
||||
blockRoot := [32]byte{'A'}
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(1),
|
||||
CurrentSlot: 1,
|
||||
SecondsIntoSlot: uint64(greaterThanAttestingInterval.Seconds()),
|
||||
}
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{}, f.store.proposerBoostRoot)
|
||||
})
|
||||
@@ -442,11 +476,15 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago + 0 seconds into the attesting interval.
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
fmt.Println(genesis)
|
||||
blockRoot := [32]byte{'A'}
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(1),
|
||||
CurrentSlot: types.Slot(1),
|
||||
SecondsIntoSlot: 0,
|
||||
}
|
||||
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
|
||||
})
|
||||
@@ -454,13 +492,16 @@ func TestForkChoice_BoostProposerRoot(t *testing.T) {
|
||||
f := &ForkChoice{
|
||||
store: &Store{},
|
||||
}
|
||||
// Genesis set to 1 slot ago + (attesting interval / 2).
|
||||
genesis := time.Now().Add(-time.Duration(cfg.SecondsPerSlot) * time.Second)
|
||||
blockRoot := [32]byte{'A'}
|
||||
halfAttestingInterval := time.Second
|
||||
genesis = genesis.Add(-halfAttestingInterval)
|
||||
args := &forkchoicetypes.ProposerBoostRootArgs{
|
||||
BlockRoot: blockRoot,
|
||||
BlockSlot: types.Slot(1),
|
||||
CurrentSlot: types.Slot(1),
|
||||
SecondsIntoSlot: uint64(halfAttestingInterval.Seconds()),
|
||||
}
|
||||
|
||||
err := f.BoostProposerRoot(ctx, types.Slot(1), blockRoot, genesis)
|
||||
err := f.BoostProposerRoot(ctx, args)
|
||||
require.NoError(t, err)
|
||||
require.DeepEqual(t, [32]byte{'A'}, f.store.proposerBoostRoot)
|
||||
})
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
fieldparams "github.com/prysmaticlabs/prysm/config/fieldparams"
|
||||
"github.com/prysmaticlabs/prysm/config/params"
|
||||
"github.com/prysmaticlabs/prysm/encoding/bytesutil"
|
||||
pmath "github.com/prysmaticlabs/prysm/math"
|
||||
pbrpc "github.com/prysmaticlabs/prysm/proto/prysm/v1alpha1"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opencensus.io/trace"
|
||||
)
|
||||
|
||||
@@ -30,43 +32,14 @@ func New(justifiedEpoch, finalizedEpoch types.Epoch, finalizedRoot [32]byte) *Fo
|
||||
proposerBoostRoot: [32]byte{},
|
||||
nodes: make([]*Node, 0),
|
||||
nodesIndices: make(map[[32]byte]uint64),
|
||||
payloadIndices: make(map[[32]byte]uint64),
|
||||
canonicalNodes: make(map[[32]byte]bool),
|
||||
pruneThreshold: defaultPruneThreshold,
|
||||
}
|
||||
|
||||
b := make([]uint64, 0)
|
||||
v := make([]Vote, 0)
|
||||
st := &optimisticStore{
|
||||
validatedTips: make(map[[32]byte]types.Slot),
|
||||
}
|
||||
return &ForkChoice{store: s, balances: b, votes: v, syncedTips: st}
|
||||
}
|
||||
|
||||
// SetSyncedTips sets the synced and validated tips from the passed map
|
||||
func (f *ForkChoice) SetSyncedTips(tips map[[32]byte]types.Slot) error {
|
||||
if len(tips) == 0 {
|
||||
return errInvalidSyncedTips
|
||||
}
|
||||
newTips := make(map[[32]byte]types.Slot, len(tips))
|
||||
for k, v := range tips {
|
||||
newTips[k] = v
|
||||
}
|
||||
f.syncedTips.Lock()
|
||||
defer f.syncedTips.Unlock()
|
||||
f.syncedTips.validatedTips = newTips
|
||||
return nil
|
||||
}
|
||||
|
||||
// SyncedTips returns the synced and validated tips from the fork choice store.
|
||||
func (f *ForkChoice) SyncedTips() map[[32]byte]types.Slot {
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
|
||||
m := make(map[[32]byte]types.Slot)
|
||||
for k, v := range f.syncedTips.validatedTips {
|
||||
m[k] = v
|
||||
}
|
||||
return m
|
||||
return &ForkChoice{store: s, balances: b, votes: v}
|
||||
}
|
||||
|
||||
// Head returns the head root from fork choice store.
|
||||
@@ -159,7 +132,7 @@ func (f *ForkChoice) InsertOptimisticBlock(
|
||||
// Prune prunes the fork choice store with the new finalized root. The store is only pruned if the input
|
||||
// root is different than the current store finalized root, and the number of the store has met prune threshold.
|
||||
func (f *ForkChoice) Prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
return f.store.prune(ctx, finalizedRoot, f.syncedTips)
|
||||
return f.store.prune(ctx, finalizedRoot)
|
||||
}
|
||||
|
||||
// HasNode returns true if the node exists in fork choice store,
|
||||
@@ -375,6 +348,7 @@ func (s *Store) insert(ctx context.Context,
|
||||
}
|
||||
|
||||
s.nodesIndices[root] = index
|
||||
s.payloadIndices[payloadHash] = index
|
||||
s.nodes = append(s.nodes, n)
|
||||
|
||||
// Update parent with the best child and descendant only if it's available.
|
||||
@@ -443,6 +417,7 @@ func (s *Store) applyWeightChanges(
|
||||
}
|
||||
iProposerScore, err := pmath.Int(proposerScore)
|
||||
if err != nil {
|
||||
s.proposerBoostLock.Unlock()
|
||||
return err
|
||||
}
|
||||
nodeDelta = nodeDelta + iProposerScore
|
||||
@@ -453,6 +428,16 @@ func (s *Store) applyWeightChanges(
|
||||
if nodeDelta < 0 {
|
||||
d := uint64(-nodeDelta)
|
||||
if n.weight < d {
|
||||
s.proposerBoostLock.RLock()
|
||||
log.WithFields(logrus.Fields{
|
||||
"nodeDelta": d,
|
||||
"nodeRoot": fmt.Sprintf("%#x", bytesutil.Trunc(n.root[:])),
|
||||
"nodeWeight": n.weight,
|
||||
"proposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.proposerBoostRoot[:])),
|
||||
"previousProposerBoostRoot": fmt.Sprintf("%#x", bytesutil.Trunc(s.previousProposerBoostRoot[:])),
|
||||
"previousProposerBoostScore": s.previousProposerBoostScore,
|
||||
}).Warning("node with invalid weight, setting it to zero")
|
||||
s.proposerBoostLock.RUnlock()
|
||||
n.weight = 0
|
||||
} else {
|
||||
n.weight -= d
|
||||
@@ -598,7 +583,7 @@ func (s *Store) updateBestChildAndDescendant(parentIndex, childIndex uint64) err
|
||||
// prune prunes the store with the new finalized root. The tree is only
|
||||
// pruned if the input finalized root are different than the one in stored and
|
||||
// the number of the nodes in store has met prune threshold.
|
||||
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *optimisticStore) error {
|
||||
func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte) error {
|
||||
_, span := trace.StartSpan(ctx, "protoArrayForkChoice.prune")
|
||||
defer span.End()
|
||||
|
||||
@@ -618,18 +603,9 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
return nil
|
||||
}
|
||||
|
||||
// Traverse through the node list starting from the finalized node at index 0.
|
||||
// Nodes that are not branching off from the finalized node will be removed.
|
||||
syncedTips.Lock()
|
||||
defer syncedTips.Unlock()
|
||||
|
||||
canonicalNodesMap := make(map[uint64]uint64, uint64(len(s.nodes))-finalizedIndex)
|
||||
canonicalNodes := make([]*Node, 1, uint64(len(s.nodes))-finalizedIndex)
|
||||
finalizedNode := s.nodes[finalizedIndex]
|
||||
finalizedTipIndex, err := s.findSyncedTip(ctx, finalizedNode, syncedTips)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
finalizedNode.parent = NonExistentNode
|
||||
canonicalNodes[0] = finalizedNode
|
||||
canonicalNodesMap[finalizedIndex] = uint64(0)
|
||||
@@ -645,10 +621,6 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
} else {
|
||||
// Remove node and synced tip that is not part of finalized branch.
|
||||
delete(s.nodesIndices, node.root)
|
||||
_, ok := syncedTips.validatedTips[node.root]
|
||||
if ok && idx != finalizedTipIndex {
|
||||
delete(syncedTips.validatedTips, node.root)
|
||||
}
|
||||
}
|
||||
}
|
||||
s.nodesIndices[finalizedRoot] = uint64(0)
|
||||
@@ -665,7 +637,6 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
|
||||
s.nodes = canonicalNodes
|
||||
prunedCount.Inc()
|
||||
syncedTipsCount.Set(float64(len(syncedTips.validatedTips)))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -673,6 +644,10 @@ func (s *Store) prune(ctx context.Context, finalizedRoot [32]byte, syncedTips *o
|
||||
// Any node with diff finalized or justified epoch than the ones in fork choice store
|
||||
// should not be viable to head.
|
||||
func (s *Store) leadsToViableHead(node *Node) (bool, error) {
|
||||
if node.status == invalid {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
var bestDescendantViable bool
|
||||
bestDescendantIndex := node.bestDescendant
|
||||
|
||||
@@ -704,20 +679,6 @@ func (s *Store) viableForHead(node *Node) bool {
|
||||
return justified && finalized
|
||||
}
|
||||
|
||||
// Returns the list of leaves in the Fork Choice store.
|
||||
// These are all the nodes that have NonExistentNode as best child.
|
||||
// This internal method assumes that the caller holds a lock in s.nodesLock.
|
||||
func (s *Store) leaves() ([]uint64, error) {
|
||||
var leaves []uint64
|
||||
for i := uint64(0); i < uint64(len(s.nodes)); i++ {
|
||||
node := s.nodes[i]
|
||||
if node.bestChild == NonExistentNode {
|
||||
leaves = append(leaves, i)
|
||||
}
|
||||
}
|
||||
return leaves, nil
|
||||
}
|
||||
|
||||
// Tips returns all possible chain heads (leaves of fork choice tree).
|
||||
// Heads roots and heads slots are returned.
|
||||
func (f *ForkChoice) Tips() ([][32]byte, []types.Slot) {
|
||||
|
||||
@@ -100,7 +100,7 @@ func TestStore_Head_ContextCancelled(t *testing.T) {
|
||||
|
||||
func TestStore_Insert_UnknownParent(t *testing.T) {
|
||||
// The new node does not have a parent.
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
|
||||
require.NoError(t, s.insert(context.Background(), 100, [32]byte{'A'}, [32]byte{'B'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
assert.Equal(t, 1, len(s.nodes), "Did not insert block")
|
||||
assert.Equal(t, 1, len(s.nodesIndices), "Did not insert block")
|
||||
@@ -113,7 +113,7 @@ func TestStore_Insert_UnknownParent(t *testing.T) {
|
||||
func TestStore_Insert_KnownParent(t *testing.T) {
|
||||
// Similar to UnknownParent test, but this time the new node has a valid parent already in store.
|
||||
// The new node builds on top of the parent.
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64)}
|
||||
s := &Store{nodesIndices: make(map[[32]byte]uint64), payloadIndices: make(map[[32]byte]uint64)}
|
||||
s.nodes = []*Node{{}}
|
||||
p := [32]byte{'B'}
|
||||
s.nodesIndices[p] = 0
|
||||
@@ -336,11 +336,10 @@ func TestStore_Prune_LessThanThreshold(t *testing.T) {
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices, pruneThreshold: 100}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned,
|
||||
// but PruneThreshold is at 100 so nothing will be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
assert.Equal(t, 100, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 100, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -377,10 +376,9 @@ func TestStore_Prune_MoreThanThreshold(t *testing.T) {
|
||||
})
|
||||
indices[indexToHash(uint64(numOfNodes-1))] = uint64(numOfNodes - 1)
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 99 so everything before 99 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(99)))
|
||||
assert.Equal(t, 1, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 1, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -416,15 +414,14 @@ func TestStore_Prune_MoreThanOnce(t *testing.T) {
|
||||
})
|
||||
|
||||
s := &Store{nodes: nodes, nodesIndices: indices}
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// Finalized root is at index 11 so everything before 11 should be pruned.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(10)))
|
||||
assert.Equal(t, 90, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 90, len(s.nodesIndices), "Incorrect node indices count")
|
||||
|
||||
// One more time.
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(20), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(20)))
|
||||
assert.Equal(t, 80, len(s.nodes), "Incorrect nodes count")
|
||||
assert.Equal(t, 80, len(s.nodesIndices), "Incorrect node indices count")
|
||||
}
|
||||
@@ -460,7 +457,6 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
bestDescendant: NonExistentNode,
|
||||
},
|
||||
}
|
||||
syncedTips := &optimisticStore{}
|
||||
s := &Store{
|
||||
pruneThreshold: 0,
|
||||
nodes: nodes,
|
||||
@@ -470,7 +466,7 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
indexToHash(uint64(2)): 2,
|
||||
},
|
||||
}
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1)), syncedTips))
|
||||
require.NoError(t, s.prune(context.Background(), indexToHash(uint64(1))))
|
||||
require.Equal(t, len(s.nodes), 1)
|
||||
}
|
||||
|
||||
@@ -486,9 +482,6 @@ func TestStore_Prune_NoDanglingBranch(t *testing.T) {
|
||||
// J -- K -- L
|
||||
//
|
||||
//
|
||||
// Synced tips are B, D and E. And we finalize F. All that is left in fork
|
||||
// choice is F, and the only synced tip left is E which is now away from Fork
|
||||
// Choice.
|
||||
func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
f := setup(1, 1)
|
||||
@@ -505,19 +498,9 @@ func TestStore_PruneSyncedTips(t *testing.T) {
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 105, [32]byte{'k'}, [32]byte{'g'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'i'}, [32]byte{'h'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
require.NoError(t, f.InsertOptimisticBlock(ctx, 106, [32]byte{'l'}, [32]byte{'k'}, params.BeaconConfig().ZeroHash, 1, 1))
|
||||
syncedTips := &optimisticStore{
|
||||
validatedTips: map[[32]byte]types.Slot{
|
||||
[32]byte{'b'}: 101,
|
||||
[32]byte{'d'}: 103,
|
||||
[32]byte{'e'}: 104,
|
||||
},
|
||||
}
|
||||
f.syncedTips = syncedTips
|
||||
f.store.pruneThreshold = 0
|
||||
require.NoError(t, f.Prune(ctx, [32]byte{'f'}))
|
||||
require.Equal(t, 1, len(f.syncedTips.validatedTips))
|
||||
_, ok := f.syncedTips.validatedTips[[32]byte{'e'}]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, 1, f.NodeCount())
|
||||
}
|
||||
|
||||
func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
@@ -546,20 +529,6 @@ func TestStore_LeadsToViableHead(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestStore_SetSyncedTips(t *testing.T) {
|
||||
f := setup(1, 1)
|
||||
tips := make(map[[32]byte]types.Slot)
|
||||
require.ErrorIs(t, errInvalidSyncedTips, f.SetSyncedTips(tips))
|
||||
tips[bytesutil.ToBytes32([]byte{'a'})] = 1
|
||||
require.NoError(t, f.SetSyncedTips(tips))
|
||||
f.syncedTips.RLock()
|
||||
defer f.syncedTips.RUnlock()
|
||||
require.Equal(t, 1, len(f.syncedTips.validatedTips))
|
||||
slot, ok := f.syncedTips.validatedTips[bytesutil.ToBytes32([]byte{'a'})]
|
||||
require.Equal(t, true, ok)
|
||||
require.Equal(t, types.Slot(1), slot)
|
||||
}
|
||||
|
||||
func TestStore_ViableForHead(t *testing.T) {
|
||||
tests := []struct {
|
||||
n *Node
|
||||
|
||||
@@ -9,11 +9,10 @@ import (
|
||||
|
||||
// ForkChoice defines the overall fork choice store which includes all block nodes, validator's latest votes and balances.
|
||||
type ForkChoice struct {
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
balances []uint64 // tracks individual validator's last justified balances.
|
||||
syncedTips *optimisticStore
|
||||
store *Store
|
||||
votes []Vote // tracks individual validator's last vote.
|
||||
votesLock sync.RWMutex
|
||||
balances []uint64 // tracks individual validator's last justified balances.
|
||||
}
|
||||
|
||||
// Store defines the fork choice store which includes block nodes and the last view of checkpoint information.
|
||||
@@ -28,6 +27,7 @@ type Store struct {
|
||||
nodes []*Node // list of block nodes, each node is a representation of one block.
|
||||
nodesIndices map[[fieldparams.RootLength]byte]uint64 // the root of block node and the nodes index in the list.
|
||||
canonicalNodes map[[fieldparams.RootLength]byte]bool // the canonical block nodes.
|
||||
payloadIndices map[[fieldparams.RootLength]byte]uint64 // the payload hash of block node and the index in the list
|
||||
nodesLock sync.RWMutex
|
||||
proposerBoostLock sync.RWMutex
|
||||
}
|
||||
@@ -44,15 +44,17 @@ type Node struct {
|
||||
weight uint64 // weight of this node.
|
||||
bestChild uint64 // bestChild index of this node.
|
||||
bestDescendant uint64 // bestDescendant of this node.
|
||||
graffiti [fieldparams.RootLength]byte // graffiti of the block node.
|
||||
status status // optimistic status of this node
|
||||
}
|
||||
|
||||
// optimisticStore defines a structure that tracks the tips of the fully
|
||||
// validated blocks tree.
|
||||
type optimisticStore struct {
|
||||
validatedTips map[[32]byte]types.Slot
|
||||
sync.RWMutex
|
||||
}
|
||||
// enum used as optimistic status of a node
|
||||
type status uint8
|
||||
|
||||
const (
|
||||
syncing status = iota // the node is optimistic
|
||||
valid //fully validated node
|
||||
invalid // invalid execution payload
|
||||
)
|
||||
|
||||
// Vote defines an individual validator's vote.
|
||||
type Vote struct {
|
||||
|
||||
@@ -12,7 +12,6 @@ import (
|
||||
func TestVotes_CanFindHead(t *testing.T) {
|
||||
balances := []uint64{1, 1}
|
||||
f := setup(1, 1)
|
||||
syncedTips := &optimisticStore{}
|
||||
|
||||
// The head should always start at the finalized block.
|
||||
r, err := f.Head(context.Background(), 1, params.BeaconConfig().ZeroHash, balances, 1)
|
||||
@@ -249,7 +248,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
|
||||
// Verify pruning below the prune threshold does not affect head.
|
||||
f.store.pruneThreshold = 1000
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
assert.Equal(t, 11, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
@@ -273,7 +272,7 @@ func TestVotes_CanFindHead(t *testing.T) {
|
||||
// / \
|
||||
// 9 10
|
||||
f.store.pruneThreshold = 1
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5), syncedTips))
|
||||
require.NoError(t, f.store.prune(context.Background(), indexToHash(5)))
|
||||
assert.Equal(t, 5, len(f.store.nodes), "Incorrect nodes length after prune")
|
||||
|
||||
r, err = f.Head(context.Background(), 2, indexToHash(5), balances, 2)
|
||||
|
||||
9
beacon-chain/forkchoice/types/BUILD.bazel
Normal file
9
beacon-chain/forkchoice/types/BUILD.bazel
Normal file
@@ -0,0 +1,9 @@
|
||||
load("@prysm//tools/go:def.bzl", "go_library")
|
||||
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = ["types.go"],
|
||||
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/forkchoice/types",
|
||||
visibility = ["//visibility:public"],
|
||||
deps = ["@com_github_prysmaticlabs_eth2_types//:go_default_library"],
|
||||
)
|
||||
13
beacon-chain/forkchoice/types/types.go
Normal file
13
beacon-chain/forkchoice/types/types.go
Normal file
@@ -0,0 +1,13 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
types "github.com/prysmaticlabs/eth2-types"
|
||||
)
|
||||
|
||||
// ProposerBoostRootArgs to call the BoostProposerRoot function.
|
||||
type ProposerBoostRootArgs struct {
|
||||
BlockRoot [32]byte
|
||||
BlockSlot types.Slot
|
||||
CurrentSlot types.Slot
|
||||
SecondsIntoSlot uint64
|
||||
}
|
||||
@@ -18,6 +18,7 @@ go_library(
|
||||
"//api/gateway:go_default_library",
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/cache:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/kv:go_default_library",
|
||||
@@ -41,6 +42,9 @@ go_library(
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
"//beacon-chain/sync:go_default_library",
|
||||
"//beacon-chain/sync/backfill:go_default_library",
|
||||
"//beacon-chain/sync/checkpoint:go_default_library",
|
||||
"//beacon-chain/sync/genesis:go_default_library",
|
||||
"//beacon-chain/sync/initial-sync:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
@@ -74,6 +78,8 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/powchain:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//cmd:go_default_library",
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
|
||||
@@ -21,6 +21,7 @@ import (
|
||||
apigateway "github.com/prysmaticlabs/prysm/api/gateway"
|
||||
"github.com/prysmaticlabs/prysm/async/event"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/blockchain"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/db/kv"
|
||||
@@ -44,6 +45,9 @@ import (
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/state/stategen"
|
||||
regularsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync/backfill"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync/checkpoint"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/sync/genesis"
|
||||
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/cmd/beacon-chain/flags"
|
||||
@@ -91,6 +95,7 @@ type BeaconNode struct {
|
||||
slashingsPool slashings.PoolManager
|
||||
syncCommitteePool synccommittee.Pool
|
||||
depositCache *depositcache.DepositCache
|
||||
proposerIdsCache *cache.ProposerPayloadIDsCache
|
||||
stateFeed *event.Feed
|
||||
blockFeed *event.Feed
|
||||
opFeed *event.Feed
|
||||
@@ -101,6 +106,9 @@ type BeaconNode struct {
|
||||
slasherAttestationsFeed *event.Feed
|
||||
finalizedStateAtStartUp state.BeaconState
|
||||
serviceFlagOpts *serviceFlagOpts
|
||||
blockchainFlagOpts []blockchain.Option
|
||||
GenesisInitializer genesis.Initializer
|
||||
CheckpointInitializer checkpoint.Initializer
|
||||
}
|
||||
|
||||
// New creates a new node instance, sets up configuration options, and registers
|
||||
@@ -146,6 +154,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
slasherBlockHeadersFeed: new(event.Feed),
|
||||
slasherAttestationsFeed: new(event.Feed),
|
||||
serviceFlagOpts: &serviceFlagOpts{},
|
||||
proposerIdsCache: cache.NewProposerPayloadIDsCache(),
|
||||
}
|
||||
|
||||
for _, opt := range opts {
|
||||
@@ -162,13 +171,19 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
if err := beacon.startDB(cliCtx, depositAddress); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
log.Debugln("Starting Slashing DB")
|
||||
if err := beacon.startSlasherDB(cliCtx); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
bfs := backfill.NewStatus(beacon.db)
|
||||
if err := bfs.Reload(ctx); err != nil {
|
||||
return nil, errors.Wrap(err, "backfill status initialization error")
|
||||
}
|
||||
|
||||
log.Debugln("Starting State Gen")
|
||||
if err := beacon.startStateGen(); err != nil {
|
||||
if err := beacon.startStateGen(ctx, bfs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -239,7 +254,7 @@ func New(cliCtx *cli.Context, opts ...Option) (*BeaconNode, error) {
|
||||
|
||||
// db.DatabasePath is the path to the containing directory
|
||||
// db.NewDBFilename expands that to the canonical full path using
|
||||
// the same constuction as NewDB()
|
||||
// the same construction as NewDB()
|
||||
c, err := newBeaconNodePromCollector(db.NewDBFilename(beacon.db.DatabasePath()))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -371,20 +386,10 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "could not create deposit cache")
|
||||
}
|
||||
|
||||
b.depositCache = depositCache
|
||||
|
||||
if cliCtx.IsSet(flags.GenesisStatePath.Name) {
|
||||
r, err := os.Open(cliCtx.String(flags.GenesisStatePath.Name))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer func() {
|
||||
if err := r.Close(); err != nil {
|
||||
log.WithError(err).Error("Failed to close genesis file")
|
||||
}
|
||||
}()
|
||||
if err := b.db.LoadGenesis(b.ctx, r); err != nil {
|
||||
if b.GenesisInitializer != nil {
|
||||
if err := b.GenesisInitializer.Initialize(b.ctx, d); err != nil {
|
||||
if err == db.ErrExistingGenesisState {
|
||||
return errors.New("Genesis state flag specified but a genesis state " +
|
||||
"exists already. Run again with --clear-db and/or ensure you are using the " +
|
||||
@@ -393,9 +398,17 @@ func (b *BeaconNode) startDB(cliCtx *cli.Context, depositAddress string) error {
|
||||
return errors.Wrap(err, "could not load genesis from file")
|
||||
}
|
||||
}
|
||||
|
||||
if err := b.db.EnsureEmbeddedGenesis(b.ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if b.CheckpointInitializer != nil {
|
||||
if err := b.CheckpointInitializer.Initialize(b.ctx, d); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
knownContract, err := b.db.DepositContractAddress(b.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -463,10 +476,11 @@ func (b *BeaconNode) startSlasherDB(cliCtx *cli.Context) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *BeaconNode) startStateGen() error {
|
||||
b.stateGen = stategen.New(b.db)
|
||||
func (b *BeaconNode) startStateGen(ctx context.Context, bfs *backfill.Status) error {
|
||||
opts := []stategen.StateGenOption{stategen.WithBackfillStatus(bfs)}
|
||||
sg := stategen.New(b.db, opts...)
|
||||
|
||||
cp, err := b.db.FinalizedCheckpoint(b.ctx)
|
||||
cp, err := b.db.FinalizedCheckpoint(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -474,7 +488,7 @@ func (b *BeaconNode) startStateGen() error {
|
||||
r := bytesutil.ToBytes32(cp.Root)
|
||||
// Consider edge case where finalized root are zeros instead of genesis root hash.
|
||||
if r == params.BeaconConfig().ZeroHash {
|
||||
genesisBlock, err := b.db.GenesisBlock(b.ctx)
|
||||
genesisBlock, err := b.db.GenesisBlock(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -486,10 +500,12 @@ func (b *BeaconNode) startStateGen() error {
|
||||
}
|
||||
}
|
||||
|
||||
b.finalizedStateAtStartUp, err = b.stateGen.StateByRoot(b.ctx, r)
|
||||
b.finalizedStateAtStartUp, err = sg.StateByRoot(ctx, r)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
b.stateGen = sg
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -561,7 +577,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithDatabase(b.db),
|
||||
blockchain.WithDepositCache(b.depositCache),
|
||||
blockchain.WithChainStartFetcher(web3Service),
|
||||
blockchain.WithExecutionEngineCaller(web3Service.EngineAPIClient()),
|
||||
blockchain.WithExecutionEngineCaller(web3Service),
|
||||
blockchain.WithAttestationPool(b.attestationPool),
|
||||
blockchain.WithExitPool(b.exitPool),
|
||||
blockchain.WithSlashingPool(b.slashingsPool),
|
||||
@@ -572,6 +588,7 @@ func (b *BeaconNode) registerBlockchainService() error {
|
||||
blockchain.WithStateGen(b.stateGen),
|
||||
blockchain.WithSlasherAttestationsFeed(b.slasherAttestationsFeed),
|
||||
blockchain.WithFinalizedStateAtStartUp(b.finalizedStateAtStartUp),
|
||||
blockchain.WithProposerIdsCache(b.proposerIdsCache),
|
||||
)
|
||||
blockchainService, err := blockchain.NewService(b.ctx, opts...)
|
||||
if err != nil {
|
||||
@@ -788,7 +805,8 @@ func (b *BeaconNode) registerRPCService() error {
|
||||
StateGen: b.stateGen,
|
||||
EnableDebugRPCEndpoints: enableDebugRPCEndpoints,
|
||||
MaxMsgSize: maxMsgSize,
|
||||
ExecutionEngineCaller: web3Service.EngineAPIClient(),
|
||||
ProposerIdsCache: b.proposerIdsCache,
|
||||
ExecutionEngineCaller: web3Service,
|
||||
})
|
||||
|
||||
return b.services.RegisterService(rpcService)
|
||||
|
||||
@@ -8,6 +8,8 @@ import (
|
||||
"testing"
|
||||
|
||||
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
|
||||
"github.com/prysmaticlabs/prysm/beacon-chain/powchain"
|
||||
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
|
||||
"github.com/prysmaticlabs/prysm/cmd"
|
||||
"github.com/prysmaticlabs/prysm/testing/require"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
@@ -45,6 +47,11 @@ func TestNodeClose_OK(t *testing.T) {
|
||||
// TestClearDB tests clearing the database
|
||||
func TestClearDB(t *testing.T) {
|
||||
hook := logTest.NewGlobal()
|
||||
srv, endpoint, err := mockPOW.SetupRPCServer()
|
||||
require.NoError(t, err)
|
||||
t.Cleanup(func() {
|
||||
srv.Stop()
|
||||
})
|
||||
|
||||
tmp := filepath.Join(t.TempDir(), "datadirtest")
|
||||
|
||||
@@ -54,7 +61,9 @@ func TestClearDB(t *testing.T) {
|
||||
set.Bool(cmd.ForceClearDB.Name, true, "force clear db")
|
||||
|
||||
context := cli.NewContext(&app, set, nil)
|
||||
_, err := New(context)
|
||||
_, err = New(context, WithPowchainFlagOptions([]powchain.Option{
|
||||
powchain.WithHttpEndpoints([]string{endpoint}),
|
||||
}))
|
||||
require.NoError(t, err)
|
||||
|
||||
require.LogsContain(t, hook, "Removing database")
|
||||
|
||||
@@ -3,12 +3,16 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
|
||||
go_library(
|
||||
name = "go_default_library",
|
||||
srcs = [
|
||||
"auth.go",
|
||||
"block_cache.go",
|
||||
"block_reader.go",
|
||||
"check_transition_config.go",
|
||||
"deposit.go",
|
||||
"engine_client.go",
|
||||
"errors.go",
|
||||
"log.go",
|
||||
"log_processing.go",
|
||||
"metrics.go",
|
||||
"options.go",
|
||||
"prometheus.go",
|
||||
"provider.go",
|
||||
@@ -28,7 +32,6 @@ go_library(
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/powchain/types:go_default_library",
|
||||
"//beacon-chain/state:go_default_library",
|
||||
"//beacon-chain/state/state-native/v1:go_default_library",
|
||||
@@ -56,6 +59,7 @@ go_library(
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//ethclient:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
@@ -70,10 +74,12 @@ go_test(
|
||||
name = "go_default_test",
|
||||
size = "medium",
|
||||
srcs = [
|
||||
"auth_test.go",
|
||||
"block_cache_test.go",
|
||||
"block_reader_test.go",
|
||||
"check_transition_config_test.go",
|
||||
"deposit_test.go",
|
||||
"engine_client_test.go",
|
||||
"init_test.go",
|
||||
"log_processing_test.go",
|
||||
"powchain_test.go",
|
||||
@@ -84,6 +90,7 @@ go_test(
|
||||
embed = [":go_default_library"],
|
||||
deps = [
|
||||
"//async/event:go_default_library",
|
||||
"//beacon-chain/blockchain/testing:go_default_library",
|
||||
"//beacon-chain/cache/depositcache:go_default_library",
|
||||
"//beacon-chain/core/feed:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
@@ -91,8 +98,6 @@ go_test(
|
||||
"//beacon-chain/core/signing:go_default_library",
|
||||
"//beacon-chain/db:go_default_library",
|
||||
"//beacon-chain/db/testing:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1:go_default_library",
|
||||
"//beacon-chain/powchain/engine-api-client/v1/mocks:go_default_library",
|
||||
"//beacon-chain/powchain/testing:go_default_library",
|
||||
"//beacon-chain/powchain/types:go_default_library",
|
||||
"//beacon-chain/state/stategen:go_default_library",
|
||||
@@ -106,7 +111,9 @@ go_test(
|
||||
"//monitoring/clientstats:go_default_library",
|
||||
"//network:go_default_library",
|
||||
"//network/authorization:go_default_library",
|
||||
"//proto/engine/v1:go_default_library",
|
||||
"//proto/prysm/v1alpha1:go_default_library",
|
||||
"//proto/prysm/v1alpha1/wrapper:go_default_library",
|
||||
"//testing/assert:go_default_library",
|
||||
"//testing/require:go_default_library",
|
||||
"//testing/util:go_default_library",
|
||||
@@ -116,10 +123,15 @@ go_test(
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//core/types:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//rpc:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//trie:go_default_library",
|
||||
"@com_github_golang_jwt_jwt_v4//:go_default_library",
|
||||
"@com_github_holiman_uint256//:go_default_library",
|
||||
"@com_github_pkg_errors//:go_default_library",
|
||||
"@com_github_prometheus_client_golang//prometheus:go_default_library",
|
||||
"@com_github_prysmaticlabs_eth2_types//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
"@org_golang_google_protobuf//proto:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
package v1
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -1,4 +1,4 @@
|
||||
package v1
|
||||
package powchain
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
@@ -19,7 +19,7 @@ func TestJWTAuthTransport(t *testing.T) {
|
||||
jwtSecret: secret,
|
||||
}
|
||||
client := &http.Client{
|
||||
Timeout: DefaultTimeout,
|
||||
Timeout: DefaultRPCHTTPTimeout,
|
||||
Transport: authTransport,
|
||||
}
|
||||
srv := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user