mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-10 05:47:59 -05:00
Compare commits
40 Commits
fulu-genes
...
improve-ev
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c690412c61 | ||
|
|
595728b0bb | ||
|
|
f379b37d25 | ||
|
|
f4fee5e26d | ||
|
|
b24fe0d23a | ||
|
|
cbe50269de | ||
|
|
4ed2953fcf | ||
|
|
915837d059 | ||
|
|
26b276660f | ||
|
|
580509f2f4 | ||
|
|
be144da099 | ||
|
|
cc2565a422 | ||
|
|
d86353ea9d | ||
|
|
45d6002411 | ||
|
|
08c855fd4b | ||
|
|
7c86b5d737 | ||
|
|
023287f7df | ||
|
|
1432867c92 | ||
|
|
18efd620dc | ||
|
|
6139d58fa5 | ||
|
|
0ea5e2cf9d | ||
|
|
29fe707143 | ||
|
|
d68196822b | ||
|
|
924fe4de98 | ||
|
|
fe9dd255c7 | ||
|
|
83d75bcb78 | ||
|
|
ba5f7361ad | ||
|
|
8fa956036a | ||
|
|
58ce1c25f5 | ||
|
|
98532a2df3 | ||
|
|
08be6fde92 | ||
|
|
4585cdc932 | ||
|
|
aa47435c91 | ||
|
|
80eba4e6dd | ||
|
|
606294e17f | ||
|
|
977e923692 | ||
|
|
013b6b1d60 | ||
|
|
66ff6f70b8 | ||
|
|
5b1a9fb077 | ||
|
|
35bc9b1a0f |
1
.bazelrc
1
.bazelrc
@@ -34,6 +34,7 @@ build:minimal --@io_bazel_rules_go//go/config:tags=minimal
|
||||
build:release --compilation_mode=opt
|
||||
build:release --stamp
|
||||
build:release --define pgo_enabled=1
|
||||
build:release --strip=always
|
||||
|
||||
# Build binary with cgo symbolizer for debugging / profiling.
|
||||
build:cgo_symbolizer --copt=-g
|
||||
|
||||
2
.github/actions/gomodtidy/Dockerfile
vendored
2
.github/actions/gomodtidy/Dockerfile
vendored
@@ -1,4 +1,4 @@
|
||||
FROM golang:1.24-alpine
|
||||
FROM golang:1.25.1-alpine
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
|
||||
2
.github/workflows/changelog.yml
vendored
2
.github/workflows/changelog.yml
vendored
@@ -9,7 +9,7 @@ on:
|
||||
|
||||
jobs:
|
||||
run-changelog-check:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Checkout source code
|
||||
uses: actions/checkout@f43a0e5ff2bd294095638e18286ca9a3d1956744 # v3.6.0
|
||||
|
||||
2
.github/workflows/check-specrefs.yml
vendored
2
.github/workflows/check-specrefs.yml
vendored
@@ -3,7 +3,7 @@ on: [push, pull_request]
|
||||
|
||||
jobs:
|
||||
check-specrefs:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
|
||||
2
.github/workflows/clang-format.yml
vendored
2
.github/workflows/clang-format.yml
vendored
@@ -10,7 +10,7 @@ on:
|
||||
|
||||
jobs:
|
||||
clang-format-checking:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
# Is this step failing for you?
|
||||
|
||||
8
.github/workflows/fuzz.yml
vendored
8
.github/workflows/fuzz.yml
vendored
@@ -10,13 +10,13 @@ permissions:
|
||||
|
||||
jobs:
|
||||
list:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
timeout-minutes: 180
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- id: list
|
||||
uses: shogo82148/actions-go-fuzz/list@v0
|
||||
with:
|
||||
@@ -25,7 +25,7 @@ jobs:
|
||||
fuzz-tests: ${{steps.list.outputs.fuzz-tests}}
|
||||
|
||||
fuzz:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
timeout-minutes: 360
|
||||
needs: list
|
||||
strategy:
|
||||
@@ -36,7 +36,7 @@ jobs:
|
||||
- uses: actions/checkout@v3
|
||||
- uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.23.5'
|
||||
go-version: '1.25.1'
|
||||
- uses: shogo82148/actions-go-fuzz/run@v0
|
||||
with:
|
||||
packages: ${{ matrix.package }}
|
||||
|
||||
30
.github/workflows/go.yml
vendored
30
.github/workflows/go.yml
vendored
@@ -11,7 +11,7 @@ on:
|
||||
jobs:
|
||||
formatting:
|
||||
name: Formatting
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -22,7 +22,7 @@ jobs:
|
||||
|
||||
gosec:
|
||||
name: Gosec scan
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
env:
|
||||
GO111MODULE: on
|
||||
steps:
|
||||
@@ -31,7 +31,7 @@ jobs:
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
- name: Run Gosec Security Scanner
|
||||
run: | # https://github.com/securego/gosec/issues/469
|
||||
export PATH=$PATH:$(go env GOPATH)/bin
|
||||
@@ -40,31 +40,31 @@ jobs:
|
||||
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Go 1.24
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
id: go
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version: '1.25.1'
|
||||
|
||||
- name: Golangci-lint
|
||||
uses: golangci/golangci-lint-action@v5
|
||||
uses: golangci/golangci-lint-action@v8
|
||||
with:
|
||||
version: v1.64.5
|
||||
args: --config=.golangci.yml --out-${NO_FUTURE}format colored-line-number
|
||||
version: v2.4
|
||||
|
||||
build:
|
||||
name: Build
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
steps:
|
||||
- name: Set up Go 1.x
|
||||
- name: Set up Go 1.25.1
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version: '1.24.0'
|
||||
go-version: '1.25.1'
|
||||
id: go
|
||||
|
||||
- name: Check out code into the Go module directory
|
||||
|
||||
4
.github/workflows/horusec.yaml
vendored
4
.github/workflows/horusec.yaml
vendored
@@ -8,7 +8,7 @@ on:
|
||||
jobs:
|
||||
Horusec_Scan:
|
||||
name: horusec-Scan
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubuntu-4
|
||||
if: github.ref == 'refs/heads/develop'
|
||||
steps:
|
||||
- name: Check out code
|
||||
@@ -19,4 +19,4 @@ jobs:
|
||||
- name: Running Security Scan
|
||||
run: |
|
||||
curl -fsSL https://raw.githubusercontent.com/ZupIT/horusec/main/deployments/scripts/install.sh | bash -s latest
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
horusec start -t="10000" -p="./" -e="true" -i="**/crypto/bls/herumi/**, **/**/*_test.go, **/third_party/afl/**, **/crypto/keystore/key.go"
|
||||
|
||||
121
.golangci.yml
121
.golangci.yml
@@ -1,90 +1,41 @@
|
||||
version: "2"
|
||||
run:
|
||||
timeout: 10m
|
||||
go: '1.23.5'
|
||||
|
||||
issues:
|
||||
exclude-files:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
exclude-dirs:
|
||||
- proto
|
||||
- tools/analyzers
|
||||
|
||||
go: 1.23.5
|
||||
linters:
|
||||
enable-all: true
|
||||
disable:
|
||||
# Deprecated linters:
|
||||
enable:
|
||||
- errcheck
|
||||
- ineffassign
|
||||
- govet
|
||||
|
||||
# Disabled for now:
|
||||
- asasalint
|
||||
- bodyclose
|
||||
- containedctx
|
||||
- contextcheck
|
||||
- cyclop
|
||||
- depguard
|
||||
- dogsled
|
||||
- dupl
|
||||
- durationcheck
|
||||
- errname
|
||||
- err113
|
||||
- exhaustive
|
||||
- exhaustruct
|
||||
- forbidigo
|
||||
- forcetypeassert
|
||||
- funlen
|
||||
- gci
|
||||
- gochecknoglobals
|
||||
- gochecknoinits
|
||||
- goconst
|
||||
- gocritic
|
||||
- gocyclo
|
||||
- godot
|
||||
- godox
|
||||
- gofumpt
|
||||
- gomoddirectives
|
||||
- gosec
|
||||
- inamedparam
|
||||
- interfacebloat
|
||||
- intrange
|
||||
- ireturn
|
||||
- lll
|
||||
- maintidx
|
||||
- makezero
|
||||
- mnd
|
||||
- musttag
|
||||
- nakedret
|
||||
- nestif
|
||||
- nilnil
|
||||
- nlreturn
|
||||
- noctx
|
||||
- nolintlint
|
||||
- nonamedreturns
|
||||
- nosprintfhostport
|
||||
- perfsprint
|
||||
- prealloc
|
||||
- predeclared
|
||||
- promlinter
|
||||
- protogetter
|
||||
- recvcheck
|
||||
- revive
|
||||
- spancheck
|
||||
disable:
|
||||
- staticcheck
|
||||
- stylecheck
|
||||
- tagalign
|
||||
- tagliatelle
|
||||
- thelper
|
||||
- unparam
|
||||
- usetesting
|
||||
- varnamelen
|
||||
- wrapcheck
|
||||
- wsl
|
||||
- unused
|
||||
exclusions:
|
||||
generated: lax
|
||||
presets:
|
||||
- comments
|
||||
- common-false-positives
|
||||
- legacy
|
||||
- std-error-handling
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
|
||||
linters-settings:
|
||||
gocognit:
|
||||
# TODO: We should target for < 50
|
||||
min-complexity: 65
|
||||
|
||||
output:
|
||||
print-issued-lines: true
|
||||
sort-results: true
|
||||
formatters:
|
||||
enable:
|
||||
- gofmt
|
||||
- goimports
|
||||
exclusions:
|
||||
generated: lax
|
||||
paths:
|
||||
- validator/web/site_data.go
|
||||
- .*_test.go
|
||||
- proto
|
||||
- tools/analyzers
|
||||
- third_party$
|
||||
- builtin$
|
||||
- examples$
|
||||
58
WORKSPACE
58
WORKSPACE
@@ -158,15 +158,15 @@ oci_register_toolchains(
|
||||
|
||||
http_archive(
|
||||
name = "io_bazel_rules_go",
|
||||
integrity = "sha256-JD8o94crTb2DFiJJR8nMAGdBAW95zIENB4cbI+JnrI4=",
|
||||
patch_args = ["-p1"],
|
||||
patches = [
|
||||
# Expose internals of go_test for custom build transitions.
|
||||
"//third_party:io_bazel_rules_go_test.patch",
|
||||
],
|
||||
strip_prefix = "rules_go-cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9",
|
||||
sha256 = "a729c8ed2447c90fe140077689079ca0acfb7580ec41637f312d650ce9d93d96",
|
||||
urls = [
|
||||
"https://github.com/bazel-contrib/rules_go/archive/cf3c3af34bd869b864f5f2b98e2f41c2b220d6c9.tar.gz",
|
||||
"https://mirror.bazel.build/github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
"https://github.com/bazel-contrib/rules_go/releases/download/v0.57.0/rules_go-v0.57.0.zip",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -208,7 +208,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
|
||||
go_rules_dependencies()
|
||||
|
||||
go_register_toolchains(
|
||||
go_version = "1.24.6",
|
||||
go_version = "1.25.1",
|
||||
nogo = "@//:nogo",
|
||||
)
|
||||
|
||||
@@ -253,16 +253,16 @@ filegroup(
|
||||
url = "https://github.com/ethereum/EIPs/archive/5480440fe51742ed23342b68cf106cefd427e39d.tar.gz",
|
||||
)
|
||||
|
||||
consensus_spec_version = "v1.6.0-alpha.6"
|
||||
consensus_spec_version = "v1.6.0-beta.0"
|
||||
|
||||
load("@prysm//tools:download_spectests.bzl", "consensus_spec_tests")
|
||||
|
||||
consensus_spec_tests(
|
||||
name = "consensus_spec_tests",
|
||||
flavors = {
|
||||
"general": "sha256-7wkWuahuCO37uVYnxq8Badvi+jY907pBj68ixL8XDOI=",
|
||||
"minimal": "sha256-Qy/f27N0LffS/ej7VhIubwDejD6LMK0VdenKkqtZVt4=",
|
||||
"mainnet": "sha256-3H7mu5yE+FGz2Wr/nc8Nd9aEu93YoEpsYtn0zBSoeDE=",
|
||||
"general": "sha256-rT3jQp2+ZaDiO66gIQggetzqr+kGeexaLqEhbx4HDMY=",
|
||||
"minimal": "sha256-wowwwyvd0KJLsE+oDOtPkrhZyJndJpJ0lbXYsLH6XBw=",
|
||||
"mainnet": "sha256-4ZLrLNeO7NihZ4TuWH5V5fUhvW9Y3mAPBQDCqrfShps=",
|
||||
},
|
||||
version = consensus_spec_version,
|
||||
)
|
||||
@@ -278,7 +278,7 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-uvz3XfMTGfy3/BtQQoEp5XQOgrWgcH/5Zo/gR0iiP+k=",
|
||||
integrity = "sha256-sBe3Rx8zGq9IrvfgIhZQpYidGjy3mE1SiCb6/+pjLdY=",
|
||||
strip_prefix = "consensus-specs-" + consensus_spec_version[1:],
|
||||
url = "https://github.com/ethereum/consensus-specs/archive/refs/tags/%s.tar.gz" % consensus_spec_version,
|
||||
)
|
||||
@@ -300,22 +300,6 @@ filegroup(
|
||||
url = "https://github.com/ethereum/bls12-381-tests/releases/download/%s/bls_tests_yaml.tar.gz" % bls_test_version,
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "eth2_networks",
|
||||
build_file_content = """
|
||||
filegroup(
|
||||
name = "configs",
|
||||
srcs = glob([
|
||||
"shared/**/config.yaml",
|
||||
]),
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
sha256 = "77e7e3ed65e33b7bb19d30131f4c2bb39e4dfeb188ab9ae84651c3cc7600131d",
|
||||
strip_prefix = "eth2-networks-934c948e69205dcf2deb87e4ae6cc140c335f94d",
|
||||
url = "https://github.com/eth-clients/eth2-networks/archive/934c948e69205dcf2deb87e4ae6cc140c335f94d.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "holesky_testnet",
|
||||
build_file_content = """
|
||||
@@ -327,9 +311,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-YVFFrCmjoGZ3fXMWpsCpSsYbANy1grnqYwOLKIg2SsA=",
|
||||
strip_prefix = "holesky-32a72e21c6e53c262f27d50dd540cb654517d03a",
|
||||
url = "https://github.com/eth-clients/holesky/archive/32a72e21c6e53c262f27d50dd540cb654517d03a.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-htyxg8Ln2o8eCiifFN7/hcHGZg8Ir9CPzCEx+FUnnCs=",
|
||||
strip_prefix = "holesky-8aec65f11f0c986d6b76b2eb902420635eb9b815",
|
||||
url = "https://github.com/eth-clients/holesky/archive/8aec65f11f0c986d6b76b2eb902420635eb9b815.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -359,9 +343,9 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-b5F7Wg9LLMqGRIpP2uqb/YsSFVn2ynzlV7g/Nb1EFLk=",
|
||||
strip_prefix = "sepolia-562d9938f08675e9ba490a1dfba21fb05843f39f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/562d9938f08675e9ba490a1dfba21fb05843f39f.tar.gz", # 2025-03-17
|
||||
integrity = "sha256-+UZgfvBcea0K0sbvAJZOz5ZNmxdWZYbohP38heUuc6w=",
|
||||
strip_prefix = "sepolia-f9158732adb1a2a6440613ad2232eb50e7384c4f",
|
||||
url = "https://github.com/eth-clients/sepolia/archive/f9158732adb1a2a6440613ad2232eb50e7384c4f.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
@@ -375,17 +359,17 @@ filegroup(
|
||||
visibility = ["//visibility:public"],
|
||||
)
|
||||
""",
|
||||
integrity = "sha256-dPiEWUd8QvbYGwGtIm0QtCekitVLOLsW5rpQIGzz8PU=",
|
||||
strip_prefix = "hoodi-828c2c940e1141092bd4bb979cef547ea926d272",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/828c2c940e1141092bd4bb979cef547ea926d272.tar.gz",
|
||||
integrity = "sha256-G+4c9c/vci1OyPrQJnQCI+ZCv/E0cWN4hrHDY3i7ns0=",
|
||||
strip_prefix = "hoodi-b6ee51b2045a5e7fe3efac52534f75b080b049c6",
|
||||
url = "https://github.com/eth-clients/hoodi/archive/b6ee51b2045a5e7fe3efac52534f75b080b049c6.tar.gz",
|
||||
)
|
||||
|
||||
http_archive(
|
||||
name = "com_google_protobuf",
|
||||
sha256 = "9bd87b8280ef720d3240514f884e56a712f2218f0d693b48050c836028940a42",
|
||||
strip_prefix = "protobuf-25.1",
|
||||
sha256 = "7c3ebd7aaedd86fa5dc479a0fda803f602caaf78d8aff7ce83b89e1b8ae7442a",
|
||||
strip_prefix = "protobuf-28.3",
|
||||
urls = [
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v25.1.tar.gz",
|
||||
"https://github.com/protocolbuffers/protobuf/archive/v28.3.tar.gz",
|
||||
],
|
||||
)
|
||||
|
||||
|
||||
@@ -16,7 +16,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
consensusblocks "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
blocktypes "github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
payloadattribute "github.com/OffchainLabs/prysm/v6/consensus-types/payload-attribute"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -218,24 +218,18 @@ func (s *Service) getPayloadHash(ctx context.Context, root []byte) ([32]byte, er
|
||||
|
||||
// notifyNewPayload signals execution engine on a new payload.
|
||||
// It returns true if the EL has returned VALID for the block
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
preStateHeader interfaces.ExecutionData, blk interfaces.ReadOnlySignedBeaconBlock) (bool, error) {
|
||||
// stVersion should represent the version of the pre-state; header should also be from the pre-state.
|
||||
func (s *Service) notifyNewPayload(ctx context.Context, stVersion int, header interfaces.ExecutionData, blk blocktypes.ROBlock) (bool, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "blockChain.notifyNewPayload")
|
||||
defer span.End()
|
||||
|
||||
// Execution payload is only supported in Bellatrix and beyond. Pre
|
||||
// merge blocks are never optimistic
|
||||
if blk == nil {
|
||||
return false, errors.New("signed beacon block can't be nil")
|
||||
}
|
||||
if preStateVersion < version.Bellatrix {
|
||||
if stVersion < version.Bellatrix {
|
||||
return true, nil
|
||||
}
|
||||
if err := consensusblocks.BeaconBlockIsNil(blk); err != nil {
|
||||
return false, err
|
||||
}
|
||||
body := blk.Block().Body()
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(preStateHeader, body)
|
||||
enabled, err := blocks.IsExecutionEnabledUsingHeader(header, body)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(invalidBlock{error: err}, "could not determine if execution is enabled")
|
||||
}
|
||||
@@ -268,28 +262,32 @@ func (s *Service) notifyNewPayload(ctx context.Context, preStateVersion int,
|
||||
return false, errors.New("nil execution requests")
|
||||
}
|
||||
}
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
|
||||
switch {
|
||||
case err == nil:
|
||||
lastValidHash, err = s.cfg.ExecutionEngineCaller.NewPayload(ctx, payload, versionedHashes, parentRoot, requests)
|
||||
if err == nil {
|
||||
newPayloadValidNodeCount.Inc()
|
||||
return true, nil
|
||||
case errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus):
|
||||
}
|
||||
logFields := logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"parentRoot": fmt.Sprintf("%#x", parentRoot),
|
||||
"root": fmt.Sprintf("%#x", blk.Root()),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}
|
||||
if errors.Is(err, execution.ErrAcceptedSyncingPayloadStatus) {
|
||||
newPayloadOptimisticNodeCount.Inc()
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": blk.Block().Slot(),
|
||||
"payloadBlockHash": fmt.Sprintf("%#x", bytesutil.Trunc(payload.BlockHash())),
|
||||
}).Info("Called new payload with optimistic block")
|
||||
log.WithFields(logFields).Info("Called new payload with optimistic block")
|
||||
return false, nil
|
||||
case errors.Is(err, execution.ErrInvalidPayloadStatus):
|
||||
lvh := bytesutil.ToBytes32(lastValidHash)
|
||||
}
|
||||
if errors.Is(err, execution.ErrInvalidPayloadStatus) {
|
||||
log.WithFields(logFields).WithError(err).Error("Invalid payload status")
|
||||
return false, invalidBlock{
|
||||
error: ErrInvalidPayload,
|
||||
lastValidHash: lvh,
|
||||
lastValidHash: bytesutil.ToBytes32(lastValidHash),
|
||||
}
|
||||
default:
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
log.WithFields(logFields).WithError(err).Error("Unexpected execution engine error")
|
||||
return false, errors.WithMessage(ErrUndefinedExecutionEngineError, err.Error())
|
||||
}
|
||||
|
||||
// reportInvalidBlock deals with the event that an invalid block was detected by the execution layer
|
||||
|
||||
@@ -481,33 +481,12 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
phase0State, _ := util.DeterministicGenesisState(t, 1)
|
||||
altairState, _ := util.DeterministicGenesisStateAltair(t, 1)
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
a := ðpb.SignedBeaconBlockAltair{
|
||||
Block: ðpb.BeaconBlockAltair{
|
||||
Body: ðpb.BeaconBlockBodyAltair{},
|
||||
},
|
||||
}
|
||||
a := util.NewBeaconBlockAltair()
|
||||
altairBlk, err := consensusblocks.NewSignedBeaconBlock(a)
|
||||
require.NoError(t, err)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Slot: 1,
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
BlockNumber: 1,
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Slot = 1
|
||||
blk.Block.Body.ExecutionPayload.BlockNumber = 1
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(util.HydrateSignedBeaconBlockBellatrix(blk))
|
||||
require.NoError(t, err)
|
||||
st := params.BeaconConfig().SlotsPerEpoch.Mul(uint64(epochsSinceFinalitySaveHotStateDB))
|
||||
@@ -544,12 +523,6 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
blk: altairBlk,
|
||||
isValidPayload: true,
|
||||
},
|
||||
{
|
||||
name: "nil beacon block",
|
||||
postState: bellatrixState,
|
||||
errString: "signed beacon block can't be nil",
|
||||
isValidPayload: false,
|
||||
},
|
||||
{
|
||||
name: "new payload with optimistic block",
|
||||
postState: bellatrixState,
|
||||
@@ -576,15 +549,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "altair pre state, happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -595,24 +561,7 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "not at merge transition",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: make([]byte, fieldparams.RootLength),
|
||||
FeeRecipient: make([]byte, fieldparams.FeeRecipientLength),
|
||||
StateRoot: make([]byte, fieldparams.RootLength),
|
||||
ReceiptsRoot: make([]byte, fieldparams.RootLength),
|
||||
LogsBloom: make([]byte, fieldparams.LogsBloomLength),
|
||||
PrevRandao: make([]byte, fieldparams.RootLength),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, fieldparams.RootLength),
|
||||
BlockHash: make([]byte, fieldparams.RootLength),
|
||||
Transactions: make([][]byte, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -623,15 +572,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "happy case",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -642,15 +584,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "undefined error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -662,15 +597,8 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
name: "invalid block hash error from ee",
|
||||
postState: bellatrixState,
|
||||
blk: func() interfaces.ReadOnlySignedBeaconBlock {
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
b, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
return b
|
||||
@@ -701,7 +629,9 @@ func Test_NotifyNewPayload(t *testing.T) {
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, state, blkRoot))
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(tt.postState)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, tt.blk)
|
||||
rob, err := consensusblocks.NewROBlock(tt.blk)
|
||||
require.NoError(t, err)
|
||||
isValidPayload, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
if tt.errString != "" {
|
||||
require.ErrorContains(t, tt.errString, err)
|
||||
if tt.invalidBlock {
|
||||
@@ -725,17 +655,12 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
ctx := tr.ctx
|
||||
|
||||
bellatrixState, _ := util.DeterministicGenesisStateBellatrix(t, 2)
|
||||
blk := ðpb.SignedBeaconBlockBellatrix{
|
||||
Block: ðpb.BeaconBlockBellatrix{
|
||||
Body: ðpb.BeaconBlockBodyBellatrix{
|
||||
ExecutionPayload: &v1.ExecutionPayload{
|
||||
ParentHash: bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength),
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
blk := util.NewBeaconBlockBellatrix()
|
||||
blk.Block.Body.ExecutionPayload.ParentHash = bytesutil.PadTo([]byte{'a'}, fieldparams.RootLength)
|
||||
bellatrixBlk, err := consensusblocks.NewSignedBeaconBlock(blk)
|
||||
require.NoError(t, err)
|
||||
rob, err := consensusblocks.NewROBlock(bellatrixBlk)
|
||||
require.NoError(t, err)
|
||||
e := &mockExecution.EngineClient{BlockByHashMap: map[[32]byte]*v1.ExecutionBlock{}}
|
||||
e.BlockByHashMap[[32]byte{'a'}] = &v1.ExecutionBlock{
|
||||
Header: gethtypes.Header{
|
||||
@@ -752,7 +677,7 @@ func Test_NotifyNewPayload_SetOptimisticToValid(t *testing.T) {
|
||||
service.cfg.ExecutionEngineCaller = e
|
||||
postVersion, postHeader, err := getStateVersionAndPayload(bellatrixState)
|
||||
require.NoError(t, err)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, bellatrixBlk)
|
||||
validated, err := service.notifyNewPayload(ctx, postVersion, postHeader, rob)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, true, validated)
|
||||
}
|
||||
|
||||
@@ -109,6 +109,7 @@ func VerifyCellKZGProofBatch(commitmentsBytes []Bytes48, cellIndices []uint64, c
|
||||
}
|
||||
|
||||
// RecoverCellsAndKZGProofs recovers the complete cells and KZG proofs from a given set of cell indices and partial cells.
|
||||
// Note: `len(cellIndices)` must be equal to `len(partialCells)` and `cellIndices` must be sorted in ascending order.
|
||||
func RecoverCellsAndKZGProofs(cellIndices []uint64, partialCells []Cell) (CellsAndProofs, error) {
|
||||
// Convert `Cell` type to `ckzg4844.Cell`
|
||||
ckzgPartialCells := make([]ckzg4844.Cell, len(partialCells))
|
||||
|
||||
@@ -3,7 +3,6 @@ package blockchain
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
@@ -73,7 +72,13 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
if features.Get().EnableLightClient && slots.ToEpoch(s.CurrentSlot()) >= params.BeaconConfig().AltairForkEpoch {
|
||||
defer s.processLightClientUpdates(cfg)
|
||||
}
|
||||
defer s.sendStateFeedOnBlock(cfg)
|
||||
// Track whether block processing succeeded to only send event on success
|
||||
var blockProcessed bool
|
||||
defer func() {
|
||||
if blockProcessed {
|
||||
s.sendStateFeedOnBlock(cfg)
|
||||
}
|
||||
}()
|
||||
defer reportProcessingTime(startTime)
|
||||
defer reportAttestationInclusion(cfg.roblock.Block())
|
||||
|
||||
@@ -102,16 +107,22 @@ func (s *Service) postBlockProcess(cfg *postBlockProcessConfig) error {
|
||||
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
|
||||
if cfg.headRoot != cfg.roblock.Root() {
|
||||
s.logNonCanonicalBlockReceived(cfg.roblock.Root(), cfg.headRoot)
|
||||
// Mark as processed even for non-canonical blocks that succeed
|
||||
blockProcessed = true
|
||||
return nil
|
||||
}
|
||||
if err := s.getFCUArgs(cfg, fcuArgs); err != nil {
|
||||
log.WithError(err).Error("Could not get forkchoice update argument")
|
||||
// Mark as processed - block was successfully inserted even if FCU args failed
|
||||
blockProcessed = true
|
||||
return nil
|
||||
}
|
||||
if err := s.sendFCU(cfg, fcuArgs); err != nil {
|
||||
return errors.Wrap(err, "could not send FCU to engine")
|
||||
}
|
||||
|
||||
// Block successfully processed
|
||||
blockProcessed = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -746,14 +757,14 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Get a map of data column indices that are not currently available.
|
||||
missingMap, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
missing, err := missingDataColumnIndices(s.dataColumnStorage, root, peerInfo.CustodyColumns)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "missing data columns")
|
||||
}
|
||||
|
||||
// If there are no missing indices, all data column sidecars are available.
|
||||
// This is the happy path.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -770,33 +781,17 @@ func (s *Service) areDataColumnsAvailable(
|
||||
// Avoid logging if DA check is called after next slot start.
|
||||
if nextSlot.After(time.Now()) {
|
||||
timer := time.AfterFunc(time.Until(nextSlot), func() {
|
||||
missingMapCount := uint64(len(missingMap))
|
||||
missingCount := uint64(len(missing))
|
||||
|
||||
if missingMapCount == 0 {
|
||||
if missingCount == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
expected interface{} = "all"
|
||||
missing interface{} = "all"
|
||||
)
|
||||
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
colMapCount := uint64(len(peerInfo.CustodyColumns))
|
||||
|
||||
if colMapCount < numberOfColumns {
|
||||
expected = uint64MapToSortedSlice(peerInfo.CustodyColumns)
|
||||
}
|
||||
|
||||
if missingMapCount < numberOfColumns {
|
||||
missing = uint64MapToSortedSlice(missingMap)
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": block.Slot(),
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"columnsExpected": expected,
|
||||
"columnsWaiting": missing,
|
||||
"columnsExpected": helpers.SortedPrettySliceFromMap(peerInfo.CustodyColumns),
|
||||
"columnsWaiting": helpers.SortedPrettySliceFromMap(missing),
|
||||
}).Warning("Data columns still missing at slot end")
|
||||
})
|
||||
defer timer.Stop()
|
||||
@@ -812,7 +807,7 @@ func (s *Service) areDataColumnsAvailable(
|
||||
|
||||
for _, index := range idents.Indices {
|
||||
// This is a data column we are expecting.
|
||||
if _, ok := missingMap[index]; ok {
|
||||
if _, ok := missing[index]; ok {
|
||||
storedDataColumnsCount++
|
||||
}
|
||||
|
||||
@@ -823,10 +818,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
}
|
||||
|
||||
// Remove the index from the missing map.
|
||||
delete(missingMap, index)
|
||||
delete(missing, index)
|
||||
|
||||
// Return if there is no more missing data columns.
|
||||
if len(missingMap) == 0 {
|
||||
if len(missing) == 0 {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
@@ -834,10 +829,10 @@ func (s *Service) areDataColumnsAvailable(
|
||||
case <-ctx.Done():
|
||||
var missingIndices interface{} = "all"
|
||||
numberOfColumns := params.BeaconConfig().NumberOfColumns
|
||||
missingIndicesCount := uint64(len(missingMap))
|
||||
missingIndicesCount := uint64(len(missing))
|
||||
|
||||
if missingIndicesCount < numberOfColumns {
|
||||
missingIndices = uint64MapToSortedSlice(missingMap)
|
||||
missingIndices = helpers.SortedPrettySliceFromMap(missing)
|
||||
}
|
||||
|
||||
return errors.Wrapf(ctx.Err(), "data column sidecars slot: %d, BlockRoot: %#x, missing: %v", block.Slot(), root, missingIndices)
|
||||
@@ -921,16 +916,6 @@ func (s *Service) areBlobsAvailable(ctx context.Context, root [fieldparams.RootL
|
||||
}
|
||||
}
|
||||
|
||||
// uint64MapToSortedSlice produces a sorted uint64 slice from a map.
|
||||
func uint64MapToSortedSlice(input map[uint64]bool) []uint64 {
|
||||
output := make([]uint64, 0, len(input))
|
||||
for idx := range input {
|
||||
output = append(output, idx)
|
||||
}
|
||||
slices.Sort[[]uint64](output)
|
||||
return output
|
||||
}
|
||||
|
||||
// lateBlockTasks is called 4 seconds into the slot and performs tasks
|
||||
// related to late blocks. It emits a MissedSlot state feed event.
|
||||
// It calls FCU and sets the right attributes if we are proposing next slot
|
||||
|
||||
@@ -9,8 +9,10 @@ import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
mock "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/blocks"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
@@ -3462,3 +3464,156 @@ func TestProcessLightClientFinalityUpdate(t *testing.T) {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test_postBlockProcess_EventSending tests that block processed events are only sent
|
||||
// when block processing succeeds according to the decision tree:
|
||||
//
|
||||
// Block Processing Flow:
|
||||
// ├─ InsertNode FAILS (fork choice timeout)
|
||||
// │ └─ blockProcessed = false ❌ NO EVENT
|
||||
// │
|
||||
// ├─ InsertNode succeeds
|
||||
// │ ├─ handleBlockAttestations FAILS
|
||||
// │ │ └─ blockProcessed = false ❌ NO EVENT
|
||||
// │ │
|
||||
// │ ├─ Block is NON-CANONICAL (not head)
|
||||
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 111)
|
||||
// │ │
|
||||
// │ ├─ Block IS CANONICAL (new head)
|
||||
// │ │ ├─ getFCUArgs FAILS
|
||||
// │ │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 117)
|
||||
// │ │ │
|
||||
// │ │ ├─ sendFCU FAILS
|
||||
// │ │ │ └─ blockProcessed = false ❌ NO EVENT
|
||||
// │ │ │
|
||||
// │ │ └─ Full success
|
||||
// │ │ └─ blockProcessed = true ✅ SEND EVENT (Line 125)
|
||||
func Test_postBlockProcess_EventSending(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
|
||||
// Helper to create a minimal valid block and state
|
||||
createTestBlockAndState := func(t *testing.T, slot primitives.Slot, parentRoot [32]byte) (consensusblocks.ROBlock, state.BeaconState) {
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, st.SetSlot(slot))
|
||||
|
||||
stateRoot, err := st.HashTreeRoot(ctx)
|
||||
require.NoError(t, err)
|
||||
|
||||
blk := util.NewBeaconBlock()
|
||||
blk.Block.Slot = slot
|
||||
blk.Block.ProposerIndex = 0
|
||||
blk.Block.ParentRoot = parentRoot[:]
|
||||
blk.Block.StateRoot = stateRoot[:]
|
||||
|
||||
signed := util.HydrateSignedBeaconBlock(blk)
|
||||
roBlock, err := consensusblocks.NewSignedBeaconBlock(signed)
|
||||
require.NoError(t, err)
|
||||
|
||||
roBlk, err := consensusblocks.NewROBlock(roBlock)
|
||||
require.NoError(t, err)
|
||||
return roBlk, st
|
||||
}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
setupService func(*Service, [32]byte)
|
||||
expectEvent bool
|
||||
expectError bool
|
||||
errorContains string
|
||||
}{
|
||||
{
|
||||
name: "Block successfully processed - sends event",
|
||||
setupService: func(s *Service, blockRoot [32]byte) {
|
||||
// Default setup should work
|
||||
},
|
||||
expectEvent: true,
|
||||
expectError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create service with required options
|
||||
opts := testServiceOptsWithDB(t)
|
||||
service, err := NewService(ctx, opts...)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Initialize fork choice with genesis block
|
||||
st, _ := util.DeterministicGenesisState(t, 64)
|
||||
require.NoError(t, st.SetSlot(0))
|
||||
genesisBlock := util.NewBeaconBlock()
|
||||
genesisBlock.Block.StateRoot = bytesutil.PadTo([]byte("genesisState"), 32)
|
||||
signedGenesis := util.HydrateSignedBeaconBlock(genesisBlock)
|
||||
block, err := consensusblocks.NewSignedBeaconBlock(signedGenesis)
|
||||
require.NoError(t, err)
|
||||
genesisRoot, err := block.Block().HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveBlock(ctx, block))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveGenesisBlockRoot(ctx, genesisRoot))
|
||||
require.NoError(t, service.cfg.BeaconDB.SaveState(ctx, st, genesisRoot))
|
||||
|
||||
genesisROBlock, err := consensusblocks.NewROBlock(block)
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, service.cfg.ForkChoiceStore.InsertNode(ctx, st, genesisROBlock))
|
||||
|
||||
// Create test block and state with genesis as parent
|
||||
roBlock, postSt := createTestBlockAndState(t, 100, genesisRoot)
|
||||
|
||||
// Apply additional service setup if provided
|
||||
if tt.setupService != nil {
|
||||
tt.setupService(service, roBlock.Root())
|
||||
}
|
||||
|
||||
// Create post block process config
|
||||
cfg := &postBlockProcessConfig{
|
||||
ctx: ctx,
|
||||
roblock: roBlock,
|
||||
postState: postSt,
|
||||
isValidPayload: true,
|
||||
}
|
||||
|
||||
// Execute postBlockProcess
|
||||
err = service.postBlockProcess(cfg)
|
||||
|
||||
// Check error expectation
|
||||
if tt.expectError {
|
||||
require.NotNil(t, err)
|
||||
if tt.errorContains != "" {
|
||||
require.ErrorContains(t, tt.errorContains, err)
|
||||
}
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
// Give a moment for deferred functions to execute
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
|
||||
// Check event expectation
|
||||
notifier := service.cfg.StateNotifier.(*mock.MockStateNotifier)
|
||||
events := notifier.ReceivedEvents()
|
||||
|
||||
if tt.expectEvent {
|
||||
require.NotEqual(t, 0, len(events), "Expected event to be sent but none were received")
|
||||
|
||||
// Verify it's a BlockProcessed event
|
||||
foundBlockProcessed := false
|
||||
for _, evt := range events {
|
||||
if evt.Type == statefeed.BlockProcessed {
|
||||
foundBlockProcessed = true
|
||||
data, ok := evt.Data.(*statefeed.BlockProcessedData)
|
||||
require.Equal(t, true, ok, "Event data should be BlockProcessedData")
|
||||
require.Equal(t, roBlock.Root(), data.BlockRoot, "Event should contain correct block root")
|
||||
break
|
||||
}
|
||||
}
|
||||
require.Equal(t, true, foundBlockProcessed, "Expected BlockProcessed event type")
|
||||
} else {
|
||||
// For no-event cases, verify no BlockProcessed events were sent
|
||||
for _, evt := range events {
|
||||
require.NotEqual(t, statefeed.BlockProcessed, evt.Type,
|
||||
"Expected no BlockProcessed event but one was sent")
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ func (s *Service) handleDA(ctx context.Context, avs das.AvailabilityStore, block
|
||||
err = s.isDataAvailable(ctx, block)
|
||||
}
|
||||
elapsed := time.Since(start)
|
||||
if err != nil {
|
||||
if err == nil {
|
||||
dataAvailWaitedTime.Observe(float64(elapsed.Milliseconds()))
|
||||
}
|
||||
return elapsed, err
|
||||
|
||||
@@ -89,7 +89,7 @@ func (mb *mockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
return nil
|
||||
}
|
||||
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
func (mb *mockBroadcaster) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
mb.broadcastCalled = true
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -42,6 +42,9 @@ func ProcessAttesterSlashings(
|
||||
slashings []ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process attester slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessAttesterSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
@@ -59,6 +62,9 @@ func ProcessAttesterSlashing(
|
||||
slashing ethpb.AttSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process attester slashing")
|
||||
}
|
||||
if err := VerifyAttesterSlashing(ctx, beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify attester slashing")
|
||||
}
|
||||
|
||||
@@ -55,6 +55,9 @@ func ProcessVoluntaryExits(
|
||||
if len(exits) == 0 {
|
||||
return beaconState, nil
|
||||
}
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info required to process voluntary exits")
|
||||
}
|
||||
for idx, exit := range exits {
|
||||
if exit == nil || exit.Exit == nil {
|
||||
return nil, errors.New("nil voluntary exit in block body")
|
||||
|
||||
@@ -184,45 +184,54 @@ func NewGenesisBlockForState(ctx context.Context, st state.BeaconState) (interfa
|
||||
})
|
||||
case *ethpb.BeaconStateElectra:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockElectra{
|
||||
Block: ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
},
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
case *ethpb.BeaconStateFulu:
|
||||
return blocks.NewSignedBeaconBlock(ðpb.SignedBeaconBlockFulu{
|
||||
Block: electraGenesisBlock(root),
|
||||
Signature: params.BeaconConfig().EmptySignature[:],
|
||||
})
|
||||
default:
|
||||
return nil, ErrUnrecognizedState
|
||||
}
|
||||
}
|
||||
|
||||
func electraGenesisBlock(root [fieldparams.RootLength]byte) *ethpb.BeaconBlockElectra {
|
||||
return ðpb.BeaconBlockElectra{
|
||||
ParentRoot: params.BeaconConfig().ZeroHash[:],
|
||||
StateRoot: root[:],
|
||||
Body: ðpb.BeaconBlockBodyElectra{
|
||||
RandaoReveal: make([]byte, 96),
|
||||
Eth1Data: ðpb.Eth1Data{
|
||||
DepositRoot: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
},
|
||||
Graffiti: make([]byte, 32),
|
||||
SyncAggregate: ðpb.SyncAggregate{
|
||||
SyncCommitteeBits: make([]byte, fieldparams.SyncCommitteeLength/8),
|
||||
SyncCommitteeSignature: make([]byte, fieldparams.BLSSignatureLength),
|
||||
},
|
||||
ExecutionPayload: &enginev1.ExecutionPayloadDeneb{
|
||||
ParentHash: make([]byte, 32),
|
||||
FeeRecipient: make([]byte, 20),
|
||||
StateRoot: make([]byte, 32),
|
||||
ReceiptsRoot: make([]byte, 32),
|
||||
LogsBloom: make([]byte, 256),
|
||||
PrevRandao: make([]byte, 32),
|
||||
ExtraData: make([]byte, 0),
|
||||
BaseFeePerGas: make([]byte, 32),
|
||||
BlockHash: make([]byte, 32),
|
||||
Transactions: make([][]byte, 0),
|
||||
Withdrawals: make([]*enginev1.Withdrawal, 0),
|
||||
},
|
||||
BlsToExecutionChanges: make([]*ethpb.SignedBLSToExecutionChange, 0),
|
||||
BlobKzgCommitments: make([][]byte, 0),
|
||||
ExecutionRequests: &enginev1.ExecutionRequests{
|
||||
Withdrawals: make([]*enginev1.WithdrawalRequest, 0),
|
||||
Deposits: make([]*enginev1.DepositRequest, 0),
|
||||
Consolidations: make([]*enginev1.ConsolidationRequest, 0),
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -51,6 +51,9 @@ func ProcessProposerSlashings(
|
||||
slashings []*ethpb.ProposerSlashing,
|
||||
exitInfo *validators.ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
if exitInfo == nil && len(slashings) > 0 {
|
||||
return nil, errors.New("exit info required to process proposer slashings")
|
||||
}
|
||||
var err error
|
||||
for _, slashing := range slashings {
|
||||
beaconState, err = ProcessProposerSlashing(ctx, beaconState, slashing, exitInfo)
|
||||
@@ -75,6 +78,9 @@ func ProcessProposerSlashing(
|
||||
if err = VerifyProposerSlashing(beaconState, slashing); err != nil {
|
||||
return nil, errors.Wrap(err, "could not verify proposer slashing")
|
||||
}
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process proposer slashing")
|
||||
}
|
||||
beaconState, err = validators.SlashValidator(ctx, beaconState, slashing.Header_1.Header.ProposerIndex, exitInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not slash proposer index %d", slashing.Header_1.Header.ProposerIndex)
|
||||
|
||||
@@ -53,9 +53,15 @@ func ProcessOperations(ctx context.Context, st state.BeaconState, block interfac
|
||||
// 6110 validations are in VerifyOperationLengths
|
||||
bb := block.Body()
|
||||
// Electra extends the altair operations.
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
var exitInfo *v.ExitInfo
|
||||
hasSlashings := len(bb.ProposerSlashings()) > 0 || len(bb.AttesterSlashings()) > 0
|
||||
hasExits := len(bb.VoluntaryExits()) > 0
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = ProcessProposerSlashings(ctx, st, bb.ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
enginev1 "github.com/OffchainLabs/prysm/v6/proto/engine/v1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
"github.com/pkg/errors"
|
||||
@@ -91,6 +92,18 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
ctx, span := trace.StartSpan(ctx, "electra.ProcessWithdrawalRequests")
|
||||
defer span.End()
|
||||
currentEpoch := slots.ToEpoch(st.Slot())
|
||||
if len(wrs) == 0 {
|
||||
return st, nil
|
||||
}
|
||||
// It is correct to compute exitInfo once for all withdrawals in the block, as the ExitInfo pointer is
|
||||
// updated within InitiateValidatorExit which is the only function that uses it.
|
||||
var exitInfo *validators.ExitInfo
|
||||
if st.Version() < version.Electra {
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
} else {
|
||||
// After Electra, the function InitiateValidatorExit ignores the exitInfo passed to it and recomputes it anyway.
|
||||
exitInfo = &validators.ExitInfo{}
|
||||
}
|
||||
for _, wr := range wrs {
|
||||
if wr == nil {
|
||||
return nil, errors.New("nil execution layer withdrawal request")
|
||||
@@ -148,7 +161,8 @@ func ProcessWithdrawalRequests(ctx context.Context, st state.BeaconState, wrs []
|
||||
// Only exit validator if it has no pending withdrawals in the queue
|
||||
if pendingBalanceToWithdraw == 0 {
|
||||
var err error
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, validators.ExitInformation(st))
|
||||
// exitInfo is updated within InitiateValidatorExit
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, vIdx, exitInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -96,12 +96,17 @@ func ProcessRegistryUpdates(ctx context.Context, st state.BeaconState) (state.Be
|
||||
}
|
||||
|
||||
// Process validators eligible for ejection.
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, validators.ExitInformation(st))
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
if len(eligibleForEjection) > 0 {
|
||||
// It is safe to compute exitInfo once for all ejections in the epoch, as the ExitInfo pointer is
|
||||
// updated within InitiateValidatorExit which is the only function that uses it.
|
||||
exitInfo := validators.ExitInformation(st)
|
||||
for _, idx := range eligibleForEjection {
|
||||
// Here is fine to do a quadratic loop since this should
|
||||
// barely happen
|
||||
st, err = validators.InitiateValidatorExit(ctx, st, idx, exitInfo)
|
||||
if err != nil && !errors.Is(err, validators.ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate exit for validator %d", idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -10,6 +10,7 @@ go_library(
|
||||
"legacy.go",
|
||||
"metrics.go",
|
||||
"randao.go",
|
||||
"ranges.go",
|
||||
"rewards_penalties.go",
|
||||
"shuffle.go",
|
||||
"sync_committee.go",
|
||||
@@ -56,6 +57,7 @@ go_test(
|
||||
"private_access_fuzz_noop_test.go", # keep
|
||||
"private_access_test.go",
|
||||
"randao_test.go",
|
||||
"ranges_test.go",
|
||||
"rewards_penalties_test.go",
|
||||
"shuffle_test.go",
|
||||
"sync_committee_test.go",
|
||||
|
||||
@@ -399,7 +399,6 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
ctx, span := trace.StartSpan(ctx, "helpers.CommitteeAssignments")
|
||||
defer span.End()
|
||||
|
||||
// Verify if the epoch is valid for assignment based on the provided state.
|
||||
if err := VerifyAssignmentEpoch(epoch, state); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -407,12 +406,15 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals := make(map[primitives.ValidatorIndex]struct{})
|
||||
|
||||
// Deduplicate and make set for O(1) membership checks.
|
||||
vals := make(map[primitives.ValidatorIndex]struct{}, len(validators))
|
||||
for _, v := range validators {
|
||||
vals[v] = struct{}{}
|
||||
}
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment)
|
||||
// Compute committee assignments for each slot in the epoch.
|
||||
remaining := len(vals)
|
||||
|
||||
assignments := make(map[primitives.ValidatorIndex]*CommitteeAssignment, len(vals))
|
||||
for slot := startSlot; slot < startSlot+params.BeaconConfig().SlotsPerEpoch; slot++ {
|
||||
committees, err := BeaconCommittees(ctx, state, slot)
|
||||
if err != nil {
|
||||
@@ -420,7 +422,7 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
}
|
||||
for j, committee := range committees {
|
||||
for _, vIndex := range committee {
|
||||
if _, ok := vals[vIndex]; !ok { // Skip if the validator is not in the provided validators slice.
|
||||
if _, ok := vals[vIndex]; !ok {
|
||||
continue
|
||||
}
|
||||
if _, ok := assignments[vIndex]; !ok {
|
||||
@@ -429,6 +431,11 @@ func CommitteeAssignments(ctx context.Context, state state.BeaconState, epoch pr
|
||||
assignments[vIndex].Committee = committee
|
||||
assignments[vIndex].AttesterSlot = slot
|
||||
assignments[vIndex].CommitteeIndex = primitives.CommitteeIndex(j)
|
||||
delete(vals, vIndex)
|
||||
remaining--
|
||||
if remaining == 0 {
|
||||
return assignments, nil // early exit
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
62
beacon-chain/core/helpers/ranges.go
Normal file
62
beacon-chain/core/helpers/ranges.go
Normal file
@@ -0,0 +1,62 @@
|
||||
package helpers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"slices"
|
||||
)
|
||||
|
||||
// SortedSliceFromMap takes a map with uint64 keys and returns a sorted slice of the keys.
|
||||
func SortedSliceFromMap(toSort map[uint64]bool) []uint64 {
|
||||
slice := make([]uint64, 0, len(toSort))
|
||||
for key := range toSort {
|
||||
slice = append(slice, key)
|
||||
}
|
||||
|
||||
slices.Sort(slice)
|
||||
return slice
|
||||
}
|
||||
|
||||
// PrettySlice returns a pretty string representation of a sorted slice of uint64.
|
||||
// `sortedSlice` must be sorted in ascending order.
|
||||
// Example: [1,2,3,5,6,7,8,10] -> "1-3,5-8,10"
|
||||
func PrettySlice(sortedSlice []uint64) string {
|
||||
if len(sortedSlice) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
var result string
|
||||
start := sortedSlice[0]
|
||||
end := sortedSlice[0]
|
||||
|
||||
for i := 1; i < len(sortedSlice); i++ {
|
||||
if sortedSlice[i] == end+1 {
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d,", start)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
continue
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d,", start, end)
|
||||
start = sortedSlice[i]
|
||||
end = sortedSlice[i]
|
||||
}
|
||||
|
||||
if start == end {
|
||||
result += fmt.Sprintf("%d", start)
|
||||
return result
|
||||
}
|
||||
|
||||
result += fmt.Sprintf("%d-%d", start, end)
|
||||
return result
|
||||
}
|
||||
|
||||
// SortedPrettySliceFromMap combines SortedSliceFromMap and PrettySlice to return a pretty string representation of the keys in a map.
|
||||
func SortedPrettySliceFromMap(toSort map[uint64]bool) string {
|
||||
sorted := SortedSliceFromMap(toSort)
|
||||
return PrettySlice(sorted)
|
||||
}
|
||||
64
beacon-chain/core/helpers/ranges_test.go
Normal file
64
beacon-chain/core/helpers/ranges_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package helpers_test
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
)
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 3: true, 8: true, 1: true}
|
||||
expected := []uint64{1, 3, 5, 8}
|
||||
|
||||
actual := helpers.SortedSliceFromMap(input)
|
||||
require.Equal(t, len(expected), len(actual))
|
||||
|
||||
for i := range expected {
|
||||
require.Equal(t, expected[i], actual[i])
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrettySlice(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input []uint64
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "empty slice",
|
||||
input: []uint64{},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "only distinct elements",
|
||||
input: []uint64{1, 3, 5, 7, 9},
|
||||
expected: "1,3,5,7,9",
|
||||
},
|
||||
{
|
||||
name: "single range",
|
||||
input: []uint64{1, 2, 3, 4, 5},
|
||||
expected: "1-5",
|
||||
},
|
||||
{
|
||||
name: "multiple ranges and distinct elements",
|
||||
input: []uint64{1, 2, 3, 5, 6, 7, 8, 10, 12, 13, 14},
|
||||
expected: "1-3,5-8,10,12-14",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
actual := helpers.PrettySlice(tt.input)
|
||||
require.Equal(t, tt.expected, actual)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortedPrettySliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{5: true, 7: true, 8: true, 10: true}
|
||||
expected := "5,7-8,10"
|
||||
|
||||
actual := helpers.SortedPrettySliceFromMap(input)
|
||||
require.Equal(t, expected, actual)
|
||||
}
|
||||
@@ -1,6 +1,8 @@
|
||||
package peerdas
|
||||
|
||||
import (
|
||||
"sort"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/kzg"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -28,7 +30,8 @@ func MinimumColumnCountToReconstruct() uint64 {
|
||||
|
||||
// ReconstructDataColumnSidecars reconstructs all the data column sidecars from the given input data column sidecars.
|
||||
// All input sidecars must be committed to the same block.
|
||||
// `inVerifiedRoSidecars` should contain enough (unique) sidecars to reconstruct the missing columns.
|
||||
// `inVerifiedRoSidecars` should contain enough sidecars to reconstruct the missing columns, and should not contain any duplicate.
|
||||
// WARNING: This function sorts inplace `verifiedRoSidecars` by index.
|
||||
func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataColumn) ([]blocks.VerifiedRODataColumn, error) {
|
||||
// Check if there is at least one input sidecar.
|
||||
if len(verifiedRoSidecars) == 0 {
|
||||
@@ -51,18 +54,17 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
}
|
||||
}
|
||||
|
||||
// Deduplicate sidecars.
|
||||
sidecarByIndex := make(map[uint64]blocks.VerifiedRODataColumn, len(verifiedRoSidecars))
|
||||
for _, inVerifiedRoSidecar := range verifiedRoSidecars {
|
||||
sidecarByIndex[inVerifiedRoSidecar.Index] = inVerifiedRoSidecar
|
||||
}
|
||||
|
||||
// Check if there is enough sidecars to reconstruct the missing columns.
|
||||
sidecarCount := len(sidecarByIndex)
|
||||
sidecarCount := len(verifiedRoSidecars)
|
||||
if uint64(sidecarCount) < MinimumColumnCountToReconstruct() {
|
||||
return nil, ErrNotEnoughDataColumnSidecars
|
||||
}
|
||||
|
||||
// Sort the input sidecars by index.
|
||||
sort.Slice(verifiedRoSidecars, func(i, j int) bool {
|
||||
return verifiedRoSidecars[i].Index < verifiedRoSidecars[j].Index
|
||||
})
|
||||
|
||||
// Recover cells and compute proofs in parallel.
|
||||
var wg errgroup.Group
|
||||
cellsAndProofs := make([]kzg.CellsAndProofs, blobCount)
|
||||
@@ -71,10 +73,10 @@ func ReconstructDataColumnSidecars(verifiedRoSidecars []blocks.VerifiedRODataCol
|
||||
cellsIndices := make([]uint64, 0, sidecarCount)
|
||||
cells := make([]kzg.Cell, 0, sidecarCount)
|
||||
|
||||
for columnIndex, sidecar := range sidecarByIndex {
|
||||
for _, sidecar := range verifiedRoSidecars {
|
||||
cell := sidecar.Column[blobIndex]
|
||||
cells = append(cells, kzg.Cell(cell))
|
||||
cellsIndices = append(cellsIndices, columnIndex)
|
||||
cellsIndices = append(cellsIndices, sidecar.Index)
|
||||
}
|
||||
|
||||
// Recover the cells and proofs for the corresponding blob
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/electra"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
v "github.com/OffchainLabs/prysm/v6/beacon-chain/core/validators"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/state"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
@@ -378,9 +379,16 @@ func ProcessBlockForStateRoot(
|
||||
func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
// exitInfo is only needed for voluntary exits pre Electra.
|
||||
hasExits := st.Version() < version.Electra && len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
exitInfo := &validators.ExitInfo{}
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
@@ -407,10 +415,15 @@ func altairOperations(ctx context.Context, st state.BeaconState, beaconBlock int
|
||||
// This calls phase 0 block operations.
|
||||
func phase0Operations(ctx context.Context, st state.BeaconState, beaconBlock interfaces.ReadOnlyBeaconBlock) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
exitInfo := v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
hasSlashings := len(beaconBlock.Body().ProposerSlashings()) > 0 || len(beaconBlock.Body().AttesterSlashings()) > 0
|
||||
hasExits := len(beaconBlock.Body().VoluntaryExits()) > 0
|
||||
var exitInfo *v.ExitInfo
|
||||
if hasSlashings || hasExits {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = v.ExitInformation(st)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(st, exitInfo.TotalActiveBalance); err != nil {
|
||||
return nil, errors.Wrap(err, "could not update total active balance cache")
|
||||
}
|
||||
}
|
||||
st, err = b.ProcessProposerSlashings(ctx, st, beaconBlock.Body().ProposerSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -98,7 +98,9 @@ func InitiateValidatorExit(
|
||||
if validator.ExitEpoch != params.BeaconConfig().FarFutureEpoch {
|
||||
return s, ErrValidatorAlreadyExited
|
||||
}
|
||||
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to process validator exit")
|
||||
}
|
||||
// Compute exit queue epoch.
|
||||
if s.Version() < version.Electra {
|
||||
if err = initiateValidatorExitPreElectra(ctx, s, exitInfo); err != nil {
|
||||
@@ -177,6 +179,9 @@ func initiateValidatorExitPreElectra(ctx context.Context, s state.BeaconState, e
|
||||
// if exit_queue_churn >= get_validator_churn_limit(state):
|
||||
// exit_queue_epoch += Epoch(1)
|
||||
exitableEpoch := helpers.ActivationExitEpoch(time.CurrentEpoch(s))
|
||||
if exitInfo == nil {
|
||||
return errors.New("exit info is required to process validator exit")
|
||||
}
|
||||
if exitableEpoch > exitInfo.HighestExitEpoch {
|
||||
exitInfo.HighestExitEpoch = exitableEpoch
|
||||
exitInfo.Churn = 0
|
||||
@@ -235,7 +240,9 @@ func SlashValidator(
|
||||
exitInfo *ExitInfo,
|
||||
) (state.BeaconState, error) {
|
||||
var err error
|
||||
|
||||
if exitInfo == nil {
|
||||
return nil, errors.New("exit info is required to slash validator")
|
||||
}
|
||||
s, err = InitiateValidatorExitForTotalBal(ctx, s, slashedIdx, exitInfo, primitives.Gwei(exitInfo.TotalActiveBalance))
|
||||
if err != nil && !errors.Is(err, ErrValidatorAlreadyExited) {
|
||||
return nil, errors.Wrapf(err, "could not initiate validator %d exit", slashedIdx)
|
||||
|
||||
@@ -122,18 +122,18 @@ type BlobStorage struct {
|
||||
func (bs *BlobStorage) WarmCache() {
|
||||
start := time.Now()
|
||||
if bs.layoutName == LayoutNameFlat {
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes.")
|
||||
log.Info("Blob filesystem cache warm-up started. This may take a few minutes")
|
||||
} else {
|
||||
log.Info("Blob filesystem cache warm-up started.")
|
||||
log.Info("Blob filesystem cache warm-up started")
|
||||
}
|
||||
|
||||
if err := warmCache(bs.layout, bs.cache); err != nil {
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache.")
|
||||
log.WithError(err).Error("Error encountered while warming up blob filesystem cache")
|
||||
}
|
||||
if err := bs.migrateLayouts(); err != nil {
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage.")
|
||||
log.WithError(err).Error("Error encountered while migrating blob storage")
|
||||
}
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete.")
|
||||
log.WithField("elapsed", time.Since(start)).Info("Blob filesystem cache warm-up complete")
|
||||
}
|
||||
|
||||
// If any blob storage directories are found for layouts besides the configured layout, migrate them.
|
||||
|
||||
@@ -4,17 +4,26 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/time/slots"
|
||||
"github.com/pkg/errors"
|
||||
bolt "go.etcd.io/bbolt"
|
||||
)
|
||||
|
||||
var errTimeOut = errors.New("operation timed out")
|
||||
|
||||
// PruneAttestationsAtEpoch deletes all attestations from the slasher DB with target epoch
|
||||
// less than or equal to the specified epoch.
|
||||
func (s *Store) PruneAttestationsAtEpoch(
|
||||
_ context.Context, maxEpoch primitives.Epoch,
|
||||
ctx context.Context, maxEpoch primitives.Epoch,
|
||||
) (numPruned uint, err error) {
|
||||
// In some cases, pruning may take a very long time and consume significant memory in the
|
||||
// open Update transaction. Therefore, we impose a 1 minute timeout on this operation.
|
||||
ctx, cancel := context.WithTimeout(ctx, 1*time.Minute)
|
||||
defer cancel()
|
||||
|
||||
// We can prune everything less than the current epoch - history length.
|
||||
encodedEndPruneEpoch := make([]byte, 8)
|
||||
binary.BigEndian.PutUint64(encodedEndPruneEpoch, uint64(maxEpoch))
|
||||
@@ -48,13 +57,18 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
return
|
||||
}
|
||||
|
||||
if err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
err = s.db.Update(func(tx *bolt.Tx) error {
|
||||
signingRootsBkt := tx.Bucket(attestationDataRootsBucket)
|
||||
attRecordsBkt := tx.Bucket(attestationRecordsBucket)
|
||||
c := signingRootsBkt.Cursor()
|
||||
|
||||
// We begin a pruning iteration starting from the first item in the bucket.
|
||||
for k, v := c.First(); k != nil; k, v = c.Next() {
|
||||
if ctx.Err() != nil {
|
||||
// Exit the routine if the context has expired.
|
||||
return errTimeOut
|
||||
}
|
||||
|
||||
// We check the epoch from the current key in the database.
|
||||
// If we have hit an epoch that is greater than the end epoch of the pruning process,
|
||||
// we then completely exit the process as we are done.
|
||||
@@ -67,18 +81,27 @@ func (s *Store) PruneAttestationsAtEpoch(
|
||||
// so it is possible we have a few adjacent objects that have the same slot, such as
|
||||
// (target_epoch = 3 ++ _) => encode(attestation)
|
||||
if err := signingRootsBkt.Delete(k); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation signing root")
|
||||
}
|
||||
if err := attRecordsBkt.Delete(v); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "delete attestation record")
|
||||
}
|
||||
slasherAttestationsPrunedTotal.Inc()
|
||||
numPruned++
|
||||
}
|
||||
return nil
|
||||
}); err != nil {
|
||||
})
|
||||
|
||||
if errors.Is(err, errTimeOut) {
|
||||
log.Warning("Aborting pruning routine")
|
||||
return
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to prune attestations")
|
||||
return
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -142,10 +142,9 @@ var ErrEmptyBlockHash = errors.New("Block hash is empty 0x0000...")
|
||||
func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionData, versionedHashes []common.Hash, parentBlockRoot *common.Hash, executionRequests *pb.ExecutionRequests) ([]byte, error) {
|
||||
ctx, span := trace.StartSpan(ctx, "powchain.engine-api-client.NewPayload")
|
||||
defer span.End()
|
||||
start := time.Now()
|
||||
defer func() {
|
||||
defer func(start time.Time) {
|
||||
newPayloadLatency.Observe(float64(time.Since(start).Milliseconds()))
|
||||
}()
|
||||
}(time.Now())
|
||||
|
||||
d := time.Now().Add(time.Duration(params.BeaconConfig().ExecutionEngineTimeoutValue) * time.Second)
|
||||
ctx, cancel := context.WithDeadline(ctx, d)
|
||||
@@ -183,7 +182,10 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
return nil, errors.New("unknown execution data type")
|
||||
}
|
||||
if result.ValidationError != "" {
|
||||
log.WithError(errors.New(result.ValidationError)).Error("Got a validation error in newPayload")
|
||||
log.WithField("status", result.Status.String()).
|
||||
WithField("parentRoot", fmt.Sprintf("%#x", parentBlockRoot)).
|
||||
WithError(errors.New(result.ValidationError)).
|
||||
Error("Got a validation error in newPayload")
|
||||
}
|
||||
switch result.Status {
|
||||
case pb.PayloadStatus_INVALID_BLOCK_HASH:
|
||||
@@ -195,7 +197,7 @@ func (s *Service) NewPayload(ctx context.Context, payload interfaces.ExecutionDa
|
||||
case pb.PayloadStatus_VALID:
|
||||
return result.LatestValidHash, nil
|
||||
default:
|
||||
return nil, ErrUnknownPayloadStatus
|
||||
return nil, errors.Wrapf(ErrUnknownPayloadStatus, "unknown payload status: %s", result.Status.String())
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -928,7 +928,7 @@ func TestClient_HTTP(t *testing.T) {
|
||||
wrappedPayload, err := blocks.WrappedExecutionPayload(execPayload)
|
||||
require.NoError(t, err)
|
||||
resp, err := client.NewPayload(ctx, wrappedPayload, []common.Hash{}, &common.Hash{}, nil)
|
||||
require.ErrorIs(t, ErrUnknownPayloadStatus, err)
|
||||
require.ErrorIs(t, err, ErrUnknownPayloadStatus)
|
||||
require.DeepEqual(t, []uint8(nil), resp)
|
||||
})
|
||||
t.Run(BlockByNumberMethod, func(t *testing.T) {
|
||||
|
||||
@@ -254,6 +254,7 @@ func (s *Store) getCacheUpdatesByPeriod(headBlock interfaces.ReadOnlySignedBeaco
|
||||
return updatesByPeriod, nil
|
||||
}
|
||||
|
||||
// SetLastFinalityUpdate should be used only for testing.
|
||||
func (s *Store) SetLastFinalityUpdate(update interfaces.LightClientFinalityUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
@@ -283,6 +284,7 @@ func (s *Store) LastFinalityUpdate() interfaces.LightClientFinalityUpdate {
|
||||
return s.lastFinalityUpdate
|
||||
}
|
||||
|
||||
// SetLastOptimisticUpdate should be used only for testing.
|
||||
func (s *Store) SetLastOptimisticUpdate(update interfaces.LightClientOptimisticUpdate, broadcast bool) {
|
||||
s.mu.Lock()
|
||||
defer s.mu.Unlock()
|
||||
|
||||
@@ -162,6 +162,7 @@ go_test(
|
||||
"//cmd/beacon-chain/flags:go_default_library",
|
||||
"//config/fieldparams:go_default_library",
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
|
||||
@@ -5,14 +5,18 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"slices"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/altair"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/crypto/hash"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing/trace"
|
||||
@@ -306,86 +310,150 @@ func (s *Service) BroadcastLightClientFinalityUpdate(ctx context.Context, update
|
||||
return nil
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column to the p2p network, the message is assumed to be
|
||||
// broadcasted to the current fork and to the input column subnet.
|
||||
func (s *Service) BroadcastDataColumnSidecar(
|
||||
dataColumnSubnet uint64,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
) error {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.BroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
// BroadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// This function is non-blocking. It stops trying to broadcast a given sidecar when more than one slot has passed, or the context is
|
||||
// cancelled (whichever comes first).
|
||||
func (s *Service) BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error {
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Add(float64(len(sidecars)))
|
||||
|
||||
// Retrieve the current fork digest.
|
||||
forkDigest, err := s.currentForkDigest()
|
||||
if err != nil {
|
||||
err := errors.Wrap(err, "current fork digest")
|
||||
tracing.AnnotateError(span, err)
|
||||
return err
|
||||
return errors.Wrap(err, "current fork digest")
|
||||
}
|
||||
|
||||
// Non-blocking broadcast, with attempts to discover a column subnet peer if none available.
|
||||
go s.internalBroadcastDataColumnSidecar(ctx, dataColumnSubnet, dataColumnSidecar, forkDigest)
|
||||
go s.broadcastDataColumnSidecars(ctx, forkDigest, sidecars)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) internalBroadcastDataColumnSidecar(
|
||||
ctx context.Context,
|
||||
columnSubnet uint64,
|
||||
dataColumnSidecar blocks.VerifiedRODataColumn,
|
||||
forkDigest [fieldparams.VersionLength]byte,
|
||||
) {
|
||||
// Add tracing to the function.
|
||||
_, span := trace.StartSpan(ctx, "p2p.internalBroadcastDataColumnSidecar")
|
||||
defer span.End()
|
||||
|
||||
// Increase the number of broadcast attempts.
|
||||
dataColumnSidecarBroadcastAttempts.Inc()
|
||||
|
||||
// Define a one-slot length context timeout.
|
||||
secondsPerSlot := params.BeaconConfig().SecondsPerSlot
|
||||
oneSlot := time.Duration(secondsPerSlot) * time.Second
|
||||
ctx, cancel := context.WithTimeout(ctx, oneSlot)
|
||||
defer cancel()
|
||||
|
||||
// Build the topic corresponding to this column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(columnSubnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := columnSubnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, columnSubnet); err != nil {
|
||||
log.WithError(err).Error("Failed to find peers for data column subnet")
|
||||
tracing.AnnotateError(span, err)
|
||||
// broadcastDataColumnSidecars broadcasts multiple data column sidecars to the p2p network, after ensuring
|
||||
// there is at least one peer in each needed subnet. If not, it will attempt to find one before broadcasting.
|
||||
// It returns when all broadcasts are complete, or the context is cancelled (whichever comes first).
|
||||
func (s *Service) broadcastDataColumnSidecars(ctx context.Context, forkDigest [fieldparams.VersionLength]byte, sidecars []blocks.VerifiedRODataColumn) {
|
||||
type rootAndIndex struct {
|
||||
root [fieldparams.RootLength]byte
|
||||
index uint64
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, dataColumnSidecar, topic); err != nil {
|
||||
log.WithError(err).Error("Failed to broadcast data column sidecar")
|
||||
tracing.AnnotateError(span, err)
|
||||
var (
|
||||
wg sync.WaitGroup
|
||||
timings sync.Map
|
||||
)
|
||||
|
||||
logLevel := logrus.GetLevel()
|
||||
|
||||
slotPerRoot := make(map[[fieldparams.RootLength]byte]primitives.Slot, 1)
|
||||
for _, sidecar := range sidecars {
|
||||
slotPerRoot[sidecar.BlockRoot()] = sidecar.Slot()
|
||||
|
||||
wg.Go(func() {
|
||||
// Add tracing to the function.
|
||||
ctx, span := trace.StartSpan(s.ctx, "p2p.broadcastDataColumnSidecars")
|
||||
defer span.End()
|
||||
|
||||
// Compute the subnet for this data column sidecar.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
// Build the topic corresponding to subnet column subnet and this fork digest.
|
||||
topic := dataColumnSubnetToTopic(subnet, forkDigest)
|
||||
|
||||
// Compute the wrapped subnet index.
|
||||
wrappedSubIdx := subnet + dataColumnSubnetVal
|
||||
|
||||
// Find peers if needed.
|
||||
if err := s.findPeersIfNeeded(ctx, wrappedSubIdx, DataColumnSubnetTopicFormat, forkDigest, subnet); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot find peers if needed")
|
||||
return
|
||||
}
|
||||
|
||||
// Broadcast the data column sidecar to the network.
|
||||
if err := s.broadcastObject(ctx, sidecar, topic); err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
log.WithError(err).Error("Cannot broadcast data column sidecar")
|
||||
return
|
||||
}
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
|
||||
// Record the timing for log purposes.
|
||||
if logLevel >= logrus.DebugLevel {
|
||||
root := sidecar.BlockRoot()
|
||||
timings.Store(rootAndIndex{root: root, index: sidecar.Index}, time.Now())
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Wait for all broadcasts to finish.
|
||||
wg.Wait()
|
||||
|
||||
// The rest of this function is only for debug logging purposes.
|
||||
if logLevel < logrus.DebugLevel {
|
||||
return
|
||||
}
|
||||
|
||||
header := dataColumnSidecar.SignedBlockHeader.GetHeader()
|
||||
slot := header.GetSlot()
|
||||
|
||||
slotStartTime, err := slots.StartTime(s.genesisTime, slot)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Failed to convert slot to time")
|
||||
type logInfo struct {
|
||||
durationMin time.Duration
|
||||
durationMax time.Duration
|
||||
indices []uint64
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"slot": slot,
|
||||
"timeSinceSlotStart": time.Since(slotStartTime),
|
||||
"root": fmt.Sprintf("%#x", dataColumnSidecar.BlockRoot()),
|
||||
"columnSubnet": columnSubnet,
|
||||
"blobCount": len(dataColumnSidecar.Column),
|
||||
}).Debug("Broadcasted data column sidecar")
|
||||
logInfoPerRoot := make(map[[fieldparams.RootLength]byte]*logInfo, 1)
|
||||
|
||||
// Increase the number of successful broadcasts.
|
||||
dataColumnSidecarBroadcasts.Inc()
|
||||
timings.Range(func(key any, value any) bool {
|
||||
rootAndIndex, ok := key.(rootAndIndex)
|
||||
if !ok {
|
||||
log.Error("Could not cast key to rootAndIndex")
|
||||
return true
|
||||
}
|
||||
|
||||
broadcastTime, ok := value.(time.Time)
|
||||
if !ok {
|
||||
log.Error("Could not cast value to time.Time")
|
||||
return true
|
||||
}
|
||||
|
||||
slot, ok := slotPerRoot[rootAndIndex.root]
|
||||
if !ok {
|
||||
log.WithField("root", fmt.Sprintf("%#x", rootAndIndex.root)).Error("Could not find slot for root")
|
||||
return true
|
||||
}
|
||||
|
||||
duration, err := slots.SinceSlotStart(slot, s.genesisTime, broadcastTime)
|
||||
if err != nil {
|
||||
log.WithError(err).Error("Could not compute duration since slot start")
|
||||
return true
|
||||
}
|
||||
|
||||
info, ok := logInfoPerRoot[rootAndIndex.root]
|
||||
if !ok {
|
||||
logInfoPerRoot[rootAndIndex.root] = &logInfo{durationMin: duration, durationMax: duration, indices: []uint64{rootAndIndex.index}}
|
||||
return true
|
||||
}
|
||||
|
||||
info.durationMin = min(info.durationMin, duration)
|
||||
info.durationMax = max(info.durationMax, duration)
|
||||
info.indices = append(info.indices, rootAndIndex.index)
|
||||
|
||||
return true
|
||||
})
|
||||
|
||||
for root, info := range logInfoPerRoot {
|
||||
slices.Sort(info.indices)
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"root": fmt.Sprintf("%#x", root),
|
||||
"slot": slotPerRoot[root],
|
||||
"count": len(info.indices),
|
||||
"indices": helpers.PrettySlice(info.indices),
|
||||
"timeSinceSlotStartMin": info.durationMin,
|
||||
"timeSinceSlotStartMax": info.durationMax,
|
||||
}).Debug("Broadcasted data column sidecars")
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Service) findPeersIfNeeded(
|
||||
|
||||
@@ -15,10 +15,10 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
p2ptest "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/cmd/beacon-chain/flags"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/wrapper"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
@@ -60,7 +60,6 @@ func TestService_Broadcast(t *testing.T) {
|
||||
topic := "/eth2/%x/testing"
|
||||
// Set a test gossip mapping for testpb.TestSimpleMessage.
|
||||
GossipTypeMapping[reflect.TypeOf(msg)] = topic
|
||||
p.clock = startup.NewClock(p.genesisTime, bytesutil.ToBytes32(p.genesisValidatorsRoot))
|
||||
digest, err := p.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
topic = fmt.Sprintf(topic, digest)
|
||||
@@ -664,6 +663,8 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
topicFormat = DataColumnSubnetTopicFormat
|
||||
)
|
||||
|
||||
ctx := t.Context()
|
||||
|
||||
// Load the KZG trust setup.
|
||||
err := kzg.Start()
|
||||
require.NoError(t, err)
|
||||
@@ -686,7 +687,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
_, pkey, ipAddr := createHost(t, port)
|
||||
|
||||
service := &Service{
|
||||
ctx: t.Context(),
|
||||
ctx: ctx,
|
||||
host: p1.BHost,
|
||||
pubsub: p1.PubSub(),
|
||||
joinedTopics: map[string]*pubsub.Topic{},
|
||||
@@ -695,7 +696,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
genesisValidatorsRoot: bytesutil.PadTo([]byte{'A'}, 32),
|
||||
subnetsLock: make(map[uint64]*sync.RWMutex),
|
||||
subnetsLockLock: sync.Mutex{},
|
||||
peers: peers.NewStatus(t.Context(), &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
peers: peers.NewStatus(ctx, &peers.StatusConfig{ScorerParams: &scorers.Config{}}),
|
||||
custodyInfo: &custodyInfo{},
|
||||
}
|
||||
|
||||
@@ -722,7 +723,7 @@ func TestService_BroadcastDataColumn(t *testing.T) {
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
|
||||
// Broadcast to peers and wait.
|
||||
err = service.BroadcastDataColumnSidecar(subnet, verifiedRoSidecar)
|
||||
err = service.BroadcastDataColumnSidecars(ctx, []blocks.VerifiedRODataColumn{verifiedRoSidecar})
|
||||
require.NoError(t, err)
|
||||
|
||||
// Receive the message.
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
var errNoCustodyInfo = errors.New("no custody info available")
|
||||
|
||||
var _ CustodyManager = (*Service)(nil)
|
||||
|
||||
// EarliestAvailableSlot returns the earliest available slot.
|
||||
@@ -30,7 +32,7 @@ func (s *Service) CustodyGroupCount() (uint64, error) {
|
||||
defer s.custodyInfoLock.Unlock()
|
||||
|
||||
if s.custodyInfo == nil {
|
||||
return 0, errors.New("no custody info available")
|
||||
return 0, errNoCustodyInfo
|
||||
}
|
||||
|
||||
return s.custodyInfo.groupCount, nil
|
||||
|
||||
@@ -79,7 +79,7 @@ func (quicProtocol) ENRKey() string { return quickProtocolEnrKey }
|
||||
func newListener(listenerCreator func() (*discover.UDPv5, error)) (*listenerWrapper, error) {
|
||||
rawListener, err := listenerCreator()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create new listener")
|
||||
return nil, errors.Wrap(err, "create new listener")
|
||||
}
|
||||
return &listenerWrapper{
|
||||
listener: rawListener,
|
||||
@@ -536,7 +536,7 @@ func (s *Service) createListener(
|
||||
int(s.cfg.QUICPort),
|
||||
)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create local node")
|
||||
return nil, errors.Wrap(err, "create local node")
|
||||
}
|
||||
|
||||
bootNodes := make([]*enode.Node, 0, len(s.cfg.Discv5BootStrapAddrs))
|
||||
@@ -604,13 +604,27 @@ func (s *Service) createLocalNode(
|
||||
localNode = initializeSyncCommSubnets(localNode)
|
||||
|
||||
if params.FuluEnabled() {
|
||||
custodyGroupCount, err := s.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not retrieve custody group count")
|
||||
}
|
||||
// TODO: Replace this quick fix with a proper synchronization scheme (chan?)
|
||||
const delay = 1 * time.Second
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
var custodyGroupCount uint64
|
||||
|
||||
err := errNoCustodyInfo
|
||||
for errors.Is(err, errNoCustodyInfo) {
|
||||
custodyGroupCount, err = s.CustodyGroupCount()
|
||||
if errors.Is(err, errNoCustodyInfo) {
|
||||
log.WithField("delay", delay).Debug("No custody info available yet, retrying later")
|
||||
time.Sleep(delay)
|
||||
continue
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "retrieve custody group count")
|
||||
}
|
||||
|
||||
custodyGroupCountEntry := peerdas.Cgc(custodyGroupCount)
|
||||
localNode.Set(custodyGroupCountEntry)
|
||||
}
|
||||
}
|
||||
|
||||
if s.cfg != nil && s.cfg.HostAddress != "" {
|
||||
@@ -652,7 +666,7 @@ func (s *Service) startDiscoveryV5(
|
||||
}
|
||||
wrappedListener, err := newListener(createListener)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not create listener")
|
||||
return nil, errors.Wrap(err, "create listener")
|
||||
}
|
||||
record := wrappedListener.Self()
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ type (
|
||||
BroadcastBlob(ctx context.Context, subnet uint64, blob *ethpb.BlobSidecar) error
|
||||
BroadcastLightClientOptimisticUpdate(ctx context.Context, update interfaces.LightClientOptimisticUpdate) error
|
||||
BroadcastLightClientFinalityUpdate(ctx context.Context, update interfaces.LightClientFinalityUpdate) error
|
||||
BroadcastDataColumnSidecar(columnSubnet uint64, dataColumnSidecar blocks.VerifiedRODataColumn) error
|
||||
BroadcastDataColumnSidecars(ctx context.Context, sidecars []blocks.VerifiedRODataColumn) error
|
||||
}
|
||||
|
||||
// SetStreamHandler configures p2p to handle streams of a certain topic ID.
|
||||
|
||||
@@ -14,7 +14,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/peers/scorers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/startup"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
@@ -92,7 +91,6 @@ type Service struct {
|
||||
peerDisconnectionTime *cache.Cache
|
||||
custodyInfo *custodyInfo
|
||||
custodyInfoLock sync.RWMutex // Lock access to custodyInfo
|
||||
clock *startup.Clock
|
||||
allForkDigests map[[4]byte]struct{}
|
||||
}
|
||||
|
||||
|
||||
@@ -169,7 +169,7 @@ func (*FakeP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interfac
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar -- fake.
|
||||
func (*FakeP2P) BroadcastDataColumnSidecar(_ uint64, _ blocks.VerifiedRODataColumn) error {
|
||||
func (*FakeP2P) BroadcastDataColumnSidecars(_ context.Context, _ []blocks.VerifiedRODataColumn) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -63,7 +63,7 @@ func (m *MockBroadcaster) BroadcastLightClientFinalityUpdate(_ context.Context,
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
func (m *MockBroadcaster) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||
m.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -233,7 +233,7 @@ func (p *TestP2P) BroadcastLightClientFinalityUpdate(_ context.Context, _ interf
|
||||
}
|
||||
|
||||
// BroadcastDataColumnSidecar broadcasts a data column for mock.
|
||||
func (p *TestP2P) BroadcastDataColumnSidecar(uint64, blocks.VerifiedRODataColumn) error {
|
||||
func (p *TestP2P) BroadcastDataColumnSidecars(context.Context, []blocks.VerifiedRODataColumn) error {
|
||||
p.BroadcastCalled.Store(true)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -1465,8 +1465,7 @@ func (s *Server) GetBlockHeader(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockID))
|
||||
ok := shared.WriteBlockFetchError(w, blk, err)
|
||||
if !ok {
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
blockHeader, err := blk.Header()
|
||||
|
||||
@@ -58,12 +58,7 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if blk == nil {
|
||||
httputil.HandleError(w, "Block not found", http.StatusNotFound)
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -174,12 +169,7 @@ func (s *Server) GetBlobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if blk == nil {
|
||||
httputil.HandleError(w, "Block not found", http.StatusNotFound)
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -27,5 +27,7 @@ go_test(
|
||||
"//testing/require:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common:go_default_library",
|
||||
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
|
||||
"@com_github_sirupsen_logrus//:go_default_library",
|
||||
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
|
||||
],
|
||||
)
|
||||
|
||||
@@ -124,14 +124,28 @@ func convertValueForJSON(v reflect.Value, tag string) interface{} {
|
||||
if !v.Field(i).CanInterface() {
|
||||
continue // unexported
|
||||
}
|
||||
key := f.Tag.Get("json")
|
||||
if key == "" || key == "-" {
|
||||
jsonTag := f.Tag.Get("json")
|
||||
if jsonTag == "-" {
|
||||
continue
|
||||
}
|
||||
|
||||
// Parse JSON tag options (e.g., "fieldname,omitempty")
|
||||
parts := strings.Split(jsonTag, ",")
|
||||
key := parts[0]
|
||||
|
||||
if key == "" {
|
||||
key = f.Name
|
||||
}
|
||||
m[key] = convertValueForJSON(v.Field(i), tag)
|
||||
|
||||
fieldValue := convertValueForJSON(v.Field(i), tag)
|
||||
m[key] = fieldValue
|
||||
}
|
||||
return m
|
||||
|
||||
// ===== String =====
|
||||
case reflect.String:
|
||||
return v.String()
|
||||
|
||||
// ===== Default =====
|
||||
default:
|
||||
log.WithFields(log.Fields{
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"math"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/api/server/structs"
|
||||
@@ -17,6 +18,8 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/ethereum/go-ethereum/common"
|
||||
"github.com/ethereum/go-ethereum/common/hexutil"
|
||||
log "github.com/sirupsen/logrus"
|
||||
logTest "github.com/sirupsen/logrus/hooks/test"
|
||||
)
|
||||
|
||||
func TestGetDepositContract(t *testing.T) {
|
||||
@@ -689,6 +692,27 @@ func TestGetSpec_BlobSchedule(t *testing.T) {
|
||||
// Check second entry - values should be strings for consistent API output
|
||||
assert.Equal(t, "200", blobSchedule[1]["EPOCH"])
|
||||
assert.Equal(t, "9", blobSchedule[1]["MAX_BLOBS_PER_BLOCK"])
|
||||
|
||||
// Verify that fields with json:"-" are NOT present in the blob schedule entries
|
||||
for i, entry := range blobSchedule {
|
||||
t.Run(fmt.Sprintf("entry_%d_omits_json_dash_fields", i), func(t *testing.T) {
|
||||
// These fields have `json:"-"` in NetworkScheduleEntry and should be omitted
|
||||
_, hasForkVersion := entry["ForkVersion"]
|
||||
assert.Equal(t, false, hasForkVersion, "ForkVersion should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasForkDigest := entry["ForkDigest"]
|
||||
assert.Equal(t, false, hasForkDigest, "ForkDigest should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasBPOEpoch := entry["BPOEpoch"]
|
||||
assert.Equal(t, false, hasBPOEpoch, "BPOEpoch should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasVersionEnum := entry["VersionEnum"]
|
||||
assert.Equal(t, false, hasVersionEnum, "VersionEnum should be omitted due to json:\"-\"")
|
||||
|
||||
_, hasIsFork := entry["isFork"]
|
||||
assert.Equal(t, false, hasIsFork, "isFork should be omitted due to json:\"-\"")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
|
||||
@@ -715,3 +739,35 @@ func TestGetSpec_BlobSchedule_NotFulu(t *testing.T) {
|
||||
_, exists := data["BLOB_SCHEDULE"]
|
||||
require.Equal(t, false, exists)
|
||||
}
|
||||
|
||||
func TestConvertValueForJSON_NoErrorLogsForStrings(t *testing.T) {
|
||||
logHook := logTest.NewLocal(log.StandardLogger())
|
||||
defer logHook.Reset()
|
||||
|
||||
stringTestCases := []struct {
|
||||
tag string
|
||||
value string
|
||||
}{
|
||||
{"CONFIG_NAME", "mainnet"},
|
||||
{"PRESET_BASE", "mainnet"},
|
||||
{"DEPOSIT_CONTRACT_ADDRESS", "0x00000000219ab540356cBB839Cbe05303d7705Fa"},
|
||||
{"TERMINAL_TOTAL_DIFFICULTY", "58750000000000000000000"},
|
||||
}
|
||||
|
||||
for _, tc := range stringTestCases {
|
||||
t.Run(tc.tag, func(t *testing.T) {
|
||||
logHook.Reset()
|
||||
|
||||
// Convert the string value
|
||||
v := reflect.ValueOf(tc.value)
|
||||
result := convertValueForJSON(v, tc.tag)
|
||||
|
||||
// Verify the result is correct
|
||||
require.Equal(t, tc.value, result)
|
||||
|
||||
// Verify NO error was logged about unsupported field kind
|
||||
require.LogsDoNotContain(t, logHook, "Unsupported config field kind")
|
||||
require.LogsDoNotContain(t, logHook, "kind=string")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -269,12 +269,7 @@ func (s *Server) DataColumnSidecars(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
blk, err := s.Blocker.Block(ctx, []byte(blockId))
|
||||
if err != nil {
|
||||
httputil.HandleError(w, "Could not fetch block: "+err.Error(), http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
if blk == nil {
|
||||
httputil.HandleError(w, "Block not found", http.StatusNotFound)
|
||||
if !shared.WriteBlockFetchError(w, blk, err) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -68,7 +68,11 @@ func (rs *BlockRewardService) GetBlockRewardsData(ctx context.Context, blk inter
|
||||
Code: http.StatusInternalServerError,
|
||||
}
|
||||
}
|
||||
exitInfo := validators.ExitInformation(st)
|
||||
var exitInfo *validators.ExitInfo
|
||||
if len(blk.Body().ProposerSlashings()) > 0 || len(blk.Body().AttesterSlashings()) > 0 {
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo = validators.ExitInformation(st)
|
||||
}
|
||||
st, err = coreblocks.ProcessAttesterSlashings(ctx, st, blk.Body().AttesterSlashings(), exitInfo)
|
||||
if err != nil {
|
||||
return nil, &httputil.DefaultJsonError{
|
||||
|
||||
@@ -29,6 +29,11 @@ func WriteStateFetchError(w http.ResponseWriter, err error) {
|
||||
// WriteBlockFetchError writes an appropriate error based on the supplied argument.
|
||||
// The argument error should be a result of fetching block.
|
||||
func WriteBlockFetchError(w http.ResponseWriter, blk interfaces.ReadOnlySignedBeaconBlock, err error) bool {
|
||||
var blockNotFoundErr *lookup.BlockNotFoundError
|
||||
if errors.As(err, &blockNotFoundErr) {
|
||||
httputil.HandleError(w, "Block not found: "+blockNotFoundErr.Error(), http.StatusNotFound)
|
||||
return false
|
||||
}
|
||||
var invalidBlockIdErr *lookup.BlockIdParseError
|
||||
if errors.As(err, &invalidBlockIdErr) {
|
||||
httputil.HandleError(w, "Invalid block ID: "+invalidBlockIdErr.Error(), http.StatusBadRequest)
|
||||
|
||||
@@ -49,3 +49,59 @@ func TestWriteStateFetchError(t *testing.T) {
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
}
|
||||
}
|
||||
|
||||
// TestWriteBlockFetchError tests the WriteBlockFetchError function
|
||||
// to ensure that the correct error message and code are written to the response
|
||||
// and that the function returns the correct boolean value.
|
||||
func TestWriteBlockFetchError(t *testing.T) {
|
||||
cases := []struct {
|
||||
name string
|
||||
err error
|
||||
expectedMessage string
|
||||
expectedCode int
|
||||
expectedReturn bool
|
||||
}{
|
||||
{
|
||||
name: "BlockNotFoundError should return 404",
|
||||
err: lookup.NewBlockNotFoundError("block not found at slot 123"),
|
||||
expectedMessage: "Block not found",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "BlockIdParseError should return 400",
|
||||
err: &lookup.BlockIdParseError{},
|
||||
expectedMessage: "Invalid block ID",
|
||||
expectedCode: http.StatusBadRequest,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Generic error should return 500",
|
||||
err: errors.New("database connection failed"),
|
||||
expectedMessage: "Could not get block from block ID",
|
||||
expectedCode: http.StatusInternalServerError,
|
||||
expectedReturn: false,
|
||||
},
|
||||
{
|
||||
name: "Nil block should return 404",
|
||||
err: nil,
|
||||
expectedMessage: "Could not find requested block",
|
||||
expectedCode: http.StatusNotFound,
|
||||
expectedReturn: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, c := range cases {
|
||||
t.Run(c.name, func(t *testing.T) {
|
||||
writer := httptest.NewRecorder()
|
||||
result := WriteBlockFetchError(writer, nil, c.err)
|
||||
|
||||
assert.Equal(t, c.expectedReturn, result, "incorrect return value")
|
||||
assert.Equal(t, c.expectedCode, writer.Code, "incorrect status code")
|
||||
assert.StringContains(t, c.expectedMessage, writer.Body.String(), "incorrect error message")
|
||||
|
||||
e := &httputil.DefaultJsonError{}
|
||||
assert.NoError(t, json.Unmarshal(writer.Body.Bytes(), e), "failed to unmarshal response")
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -690,6 +690,10 @@ func (s *Server) ProduceSyncCommitteeContribution(w http.ResponseWriter, r *http
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
if index >= params.BeaconConfig().SyncCommitteeSubnetCount {
|
||||
httputil.HandleError(w, fmt.Sprintf("Subcommittee index needs to be between 0 and %d, %d is outside of this range.", params.BeaconConfig().SyncCommitteeSubnetCount-1, index), http.StatusBadRequest)
|
||||
return
|
||||
}
|
||||
_, slot, ok := shared.UintFromQuery(w, r, "slot", true)
|
||||
if !ok {
|
||||
return
|
||||
|
||||
@@ -2117,6 +2117,27 @@ func TestProduceSyncCommitteeContribution(t *testing.T) {
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusServiceUnavailable, writer.Code)
|
||||
})
|
||||
t.Run("invalid subcommittee_index", func(t *testing.T) {
|
||||
url := "http://example.com?slot=1&subcommittee_index=10&beacon_block_root=0xcf8e0d4e9587369b2301d0790347320302cc0943d5a1884560367e8208d920f2"
|
||||
request := httptest.NewRequest(http.MethodGet, url, nil)
|
||||
writer := httptest.NewRecorder()
|
||||
writer.Body = &bytes.Buffer{}
|
||||
|
||||
// Use non-optimistic server for this test
|
||||
server := Server{
|
||||
CoreService: &core.Service{
|
||||
HeadFetcher: &mockChain.ChainService{
|
||||
SyncCommitteeIndices: []primitives.CommitteeIndex{0},
|
||||
},
|
||||
},
|
||||
SyncCommitteePool: syncCommitteePool,
|
||||
OptimisticModeFetcher: &mockChain.ChainService{}, // Optimistic: false by default
|
||||
}
|
||||
|
||||
server.ProduceSyncCommitteeContribution(writer, request)
|
||||
assert.Equal(t, http.StatusBadRequest, writer.Code)
|
||||
require.ErrorContains(t, "Subcommittee index needs to be between 0 and 3, 10 is outside of this range.", errors.New(writer.Body.String()))
|
||||
})
|
||||
}
|
||||
|
||||
func TestServer_RegisterValidator(t *testing.T) {
|
||||
|
||||
@@ -17,13 +17,6 @@ import (
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
const (
|
||||
// validatorLookupThreshold determines when to use full assignment map vs cached linear search.
|
||||
// For requests with fewer validators, we use cached linear search to avoid the overhead
|
||||
// of building a complete assignment map for all validators in the epoch.
|
||||
validatorLookupThreshold = 3000
|
||||
)
|
||||
|
||||
// GetDutiesV2 returns the duties assigned to a list of validators specified
|
||||
// in the request object.
|
||||
//
|
||||
@@ -60,7 +53,26 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
span.SetAttributes(trace.Int64Attribute("num_pubkeys", int64(len(req.PublicKeys))))
|
||||
defer span.End()
|
||||
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, len(req.PublicKeys))
|
||||
// Collect validator indices from public keys and cache the lookups
|
||||
type validatorInfo struct {
|
||||
index primitives.ValidatorIndex
|
||||
found bool
|
||||
}
|
||||
validatorLookup := make(map[string]validatorInfo, len(req.PublicKeys))
|
||||
requestIndices := make([]primitives.ValidatorIndex, 0, len(req.PublicKeys))
|
||||
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
key := string(pubKey)
|
||||
if _, exists := validatorLookup[key]; !exists {
|
||||
idx, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
validatorLookup[key] = validatorInfo{index: idx, found: ok}
|
||||
if ok {
|
||||
requestIndices = append(requestIndices, idx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
meta, err := loadDutiesMetadata(ctx, s, req.Epoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -68,14 +80,14 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
validatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
|
||||
nextValidatorAssignments := make([]*ethpb.DutiesV2Response_Duty, 0, len(req.PublicKeys))
|
||||
|
||||
// start loop for assignments for current and next epochs
|
||||
// Build duties using cached validator index lookups
|
||||
for _, pubKey := range req.PublicKeys {
|
||||
if ctx.Err() != nil {
|
||||
return nil, status.Errorf(codes.Aborted, "Could not continue fetching assignments: %v", ctx.Err())
|
||||
}
|
||||
|
||||
validatorIndex, ok := s.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubKey))
|
||||
if !ok {
|
||||
info := validatorLookup[string(pubKey)]
|
||||
if !info.found {
|
||||
unknownDuty := ðpb.DutiesV2Response_Duty{
|
||||
PublicKey: pubKey,
|
||||
Status: ethpb.ValidatorStatus_UNKNOWN_STATUS,
|
||||
@@ -85,16 +97,15 @@ func (vs *Server) dutiesv2(ctx context.Context, req *ethpb.DutiesRequest) (*ethp
|
||||
continue
|
||||
}
|
||||
|
||||
meta.current.liteAssignment = vs.getValidatorAssignment(meta.current, validatorIndex)
|
||||
currentAssignment := vs.getValidatorAssignment(meta.current, info.index)
|
||||
nextAssignment := vs.getValidatorAssignment(meta.next, info.index)
|
||||
|
||||
meta.next.liteAssignment = vs.getValidatorAssignment(meta.next, validatorIndex)
|
||||
|
||||
assignment, nextAssignment, err := vs.buildValidatorDuty(pubKey, validatorIndex, s, req.Epoch, meta)
|
||||
assignment, nextDuty, err := vs.buildValidatorDuty(pubKey, info.index, s, req.Epoch, meta, currentAssignment, nextAssignment)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
validatorAssignments = append(validatorAssignments, assignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
|
||||
nextValidatorAssignments = append(nextValidatorAssignments, nextDuty)
|
||||
}
|
||||
|
||||
// Dependent roots for fork choice
|
||||
@@ -147,18 +158,15 @@ type dutiesMetadata struct {
|
||||
}
|
||||
|
||||
type metadata struct {
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
startSlot primitives.Slot
|
||||
committeesBySlot [][][]primitives.ValidatorIndex
|
||||
validatorAssignmentMap map[primitives.ValidatorIndex]*helpers.LiteAssignment
|
||||
liteAssignment *helpers.LiteAssignment
|
||||
committeesAtSlot uint64
|
||||
proposalSlots map[primitives.ValidatorIndex][]primitives.Slot
|
||||
committeeAssignments map[primitives.ValidatorIndex]*helpers.CommitteeAssignment
|
||||
}
|
||||
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*dutiesMetadata, error) {
|
||||
func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*dutiesMetadata, error) {
|
||||
meta := &dutiesMetadata{}
|
||||
var err error
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch, numValidators)
|
||||
meta.current, err = loadMetadata(ctx, s, reqEpoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -168,14 +176,14 @@ func loadDutiesMetadata(ctx context.Context, s state.BeaconState, reqEpoch primi
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute proposer slots: %v", err)
|
||||
}
|
||||
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, numValidators)
|
||||
meta.next, err = loadMetadata(ctx, s, reqEpoch+1, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, numValidators int) (*metadata, error) {
|
||||
func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.Epoch, requestIndices []primitives.ValidatorIndex) (*metadata, error) {
|
||||
meta := &metadata{}
|
||||
|
||||
if err := helpers.VerifyAssignmentEpoch(reqEpoch, s); err != nil {
|
||||
@@ -188,56 +196,36 @@ func loadMetadata(ctx context.Context, s state.BeaconState, reqEpoch primitives.
|
||||
}
|
||||
meta.committeesAtSlot = helpers.SlotCommitteeCount(activeValidatorCount)
|
||||
|
||||
meta.startSlot, err = slots.EpochStart(reqEpoch)
|
||||
// Use CommitteeAssignments which only computes committees for requested validators
|
||||
meta.committeeAssignments, err = helpers.CommitteeAssignments(ctx, s, reqEpoch, requestIndices)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
meta.committeesBySlot, err = helpers.PrecomputeCommittees(ctx, s, meta.startSlot)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if numValidators >= validatorLookupThreshold {
|
||||
meta.validatorAssignmentMap = buildValidatorAssignmentMap(meta.committeesBySlot, meta.startSlot)
|
||||
return nil, status.Errorf(codes.Internal, "Could not compute committee assignments: %v", err)
|
||||
}
|
||||
|
||||
return meta, nil
|
||||
}
|
||||
|
||||
// buildValidatorAssignmentMap creates a map from validator index to assignment for O(1) lookup.
|
||||
func buildValidatorAssignmentMap(
|
||||
bySlot [][][]primitives.ValidatorIndex,
|
||||
startSlot primitives.Slot,
|
||||
) map[primitives.ValidatorIndex]*helpers.LiteAssignment {
|
||||
validatorToAssignment := make(map[primitives.ValidatorIndex]*helpers.LiteAssignment)
|
||||
|
||||
for relativeSlot, committees := range bySlot {
|
||||
for cIdx, committee := range committees {
|
||||
for pos, vIdx := range committee {
|
||||
validatorToAssignment[vIdx] = &helpers.LiteAssignment{
|
||||
AttesterSlot: startSlot + primitives.Slot(relativeSlot),
|
||||
CommitteeIndex: primitives.CommitteeIndex(cIdx),
|
||||
CommitteeLength: uint64(len(committee)),
|
||||
ValidatorCommitteeIndex: uint64(pos),
|
||||
}
|
||||
}
|
||||
// findValidatorIndexInCommittee finds the position of a validator in a committee.
|
||||
func findValidatorIndexInCommittee(committee []primitives.ValidatorIndex, validatorIndex primitives.ValidatorIndex) uint64 {
|
||||
for i, vIdx := range committee {
|
||||
if vIdx == validatorIndex {
|
||||
return uint64(i)
|
||||
}
|
||||
}
|
||||
return validatorToAssignment
|
||||
return 0
|
||||
}
|
||||
|
||||
// getValidatorAssignment retrieves the assignment for a validator using either
|
||||
// the pre-built assignment map (for large requests) or linear search (for small requests).
|
||||
// getValidatorAssignment retrieves the assignment for a validator from CommitteeAssignments.
|
||||
func (vs *Server) getValidatorAssignment(meta *metadata, validatorIndex primitives.ValidatorIndex) *helpers.LiteAssignment {
|
||||
if meta.validatorAssignmentMap != nil {
|
||||
if assignment, exists := meta.validatorAssignmentMap[validatorIndex]; exists {
|
||||
return assignment
|
||||
if assignment, exists := meta.committeeAssignments[validatorIndex]; exists {
|
||||
return &helpers.LiteAssignment{
|
||||
AttesterSlot: assignment.AttesterSlot,
|
||||
CommitteeIndex: assignment.CommitteeIndex,
|
||||
CommitteeLength: uint64(len(assignment.Committee)),
|
||||
ValidatorCommitteeIndex: findValidatorIndexInCommittee(assignment.Committee, validatorIndex),
|
||||
}
|
||||
return &helpers.LiteAssignment{}
|
||||
}
|
||||
|
||||
return helpers.AssignmentForValidator(meta.committeesBySlot, meta.startSlot, validatorIndex)
|
||||
return &helpers.LiteAssignment{}
|
||||
}
|
||||
|
||||
// buildValidatorDuty builds both current‑epoch and next‑epoch V2 duty objects
|
||||
@@ -248,21 +236,23 @@ func (vs *Server) buildValidatorDuty(
|
||||
s state.BeaconState,
|
||||
reqEpoch primitives.Epoch,
|
||||
meta *dutiesMetadata,
|
||||
currentAssignment *helpers.LiteAssignment,
|
||||
nextAssignment *helpers.LiteAssignment,
|
||||
) (*ethpb.DutiesV2Response_Duty, *ethpb.DutiesV2Response_Duty, error) {
|
||||
assignment := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
nextAssignment := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
nextDuty := ðpb.DutiesV2Response_Duty{PublicKey: pubKey}
|
||||
|
||||
statusEnum := assignmentStatus(s, idx)
|
||||
assignment.ValidatorIndex = idx
|
||||
assignment.Status = statusEnum
|
||||
assignment.CommitteesAtSlot = meta.current.committeesAtSlot
|
||||
assignment.ProposerSlots = meta.current.proposalSlots[idx]
|
||||
populateCommitteeFields(assignment, meta.current.liteAssignment)
|
||||
populateCommitteeFields(assignment, currentAssignment)
|
||||
|
||||
nextAssignment.ValidatorIndex = idx
|
||||
nextAssignment.Status = statusEnum
|
||||
nextAssignment.CommitteesAtSlot = meta.next.committeesAtSlot
|
||||
populateCommitteeFields(nextAssignment, meta.next.liteAssignment)
|
||||
nextDuty.ValidatorIndex = idx
|
||||
nextDuty.Status = statusEnum
|
||||
nextDuty.CommitteesAtSlot = meta.next.committeesAtSlot
|
||||
populateCommitteeFields(nextDuty, nextAssignment)
|
||||
|
||||
// Sync committee flags
|
||||
if coreTime.HigherEqualThanAltairVersionAndEpoch(s, reqEpoch) {
|
||||
@@ -271,7 +261,7 @@ func (vs *Server) buildValidatorDuty(
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not determine current epoch sync committee: %v", err)
|
||||
}
|
||||
assignment.IsSyncCommittee = inSync
|
||||
nextAssignment.IsSyncCommittee = inSync
|
||||
nextDuty.IsSyncCommittee = inSync
|
||||
if inSync {
|
||||
if err := core.RegisterSyncSubnetCurrentPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not register sync subnet current period: %v", err)
|
||||
@@ -290,18 +280,16 @@ func (vs *Server) buildValidatorDuty(
|
||||
if err != nil {
|
||||
return nil, nil, status.Errorf(codes.Internal, "Could not determine next epoch sync committee: %v", err)
|
||||
}
|
||||
nextAssignment.IsSyncCommittee = nextInSync
|
||||
nextDuty.IsSyncCommittee = nextInSync
|
||||
if nextInSync {
|
||||
go func() {
|
||||
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
log.WithError(err).Warn("Could not register sync subnet next period")
|
||||
}
|
||||
}()
|
||||
if err := core.RegisterSyncSubnetNextPeriodProto(s, reqEpoch, pubKey, statusEnum); err != nil {
|
||||
log.WithError(err).Warn("Could not register sync subnet next period")
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return assignment, nextAssignment, nil
|
||||
return assignment, nextDuty, nil
|
||||
}
|
||||
|
||||
func populateCommitteeFields(duty *ethpb.DutiesV2Response_Duty, la *helpers.LiteAssignment) {
|
||||
|
||||
@@ -560,105 +560,20 @@ func TestGetDutiesV2_SyncNotReady(t *testing.T) {
|
||||
assert.ErrorContains(t, "Syncing to latest head", err)
|
||||
}
|
||||
|
||||
func TestBuildValidatorAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(200)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}}, // slot 200, committee 0
|
||||
{{7, 8, 9}}, // slot 201, committee 0
|
||||
{{4, 5}, {10, 11}}, // slot 202, committee 0 & 1
|
||||
}
|
||||
|
||||
assignmentMap := buildValidatorAssignmentMap(bySlot, start)
|
||||
|
||||
// Test validator 8 assignment (slot 201, committee 0, position 1)
|
||||
vIdx := primitives.ValidatorIndex(8)
|
||||
got, exists := assignmentMap[vIdx]
|
||||
assert.Equal(t, true, exists)
|
||||
require.NotNil(t, got)
|
||||
assert.Equal(t, start+1, got.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), got.CommitteeIndex)
|
||||
assert.Equal(t, uint64(3), got.CommitteeLength)
|
||||
assert.Equal(t, uint64(1), got.ValidatorCommitteeIndex)
|
||||
|
||||
// Test validator 1 assignment (slot 200, committee 0, position 0)
|
||||
vIdx1 := primitives.ValidatorIndex(1)
|
||||
got1, exists1 := assignmentMap[vIdx1]
|
||||
assert.Equal(t, true, exists1)
|
||||
require.NotNil(t, got1)
|
||||
assert.Equal(t, start, got1.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), got1.CommitteeIndex)
|
||||
assert.Equal(t, uint64(3), got1.CommitteeLength)
|
||||
assert.Equal(t, uint64(0), got1.ValidatorCommitteeIndex)
|
||||
|
||||
// Test validator 10 assignment (slot 202, committee 1, position 0)
|
||||
vIdx10 := primitives.ValidatorIndex(10)
|
||||
got10, exists10 := assignmentMap[vIdx10]
|
||||
assert.Equal(t, true, exists10)
|
||||
require.NotNil(t, got10)
|
||||
assert.Equal(t, start+2, got10.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(1), got10.CommitteeIndex)
|
||||
assert.Equal(t, uint64(2), got10.CommitteeLength)
|
||||
assert.Equal(t, uint64(0), got10.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator
|
||||
_, exists99 := assignmentMap[primitives.ValidatorIndex(99)]
|
||||
assert.Equal(t, false, exists99)
|
||||
|
||||
// Verify that we get the same results as the linear search
|
||||
for _, committees := range bySlot {
|
||||
for _, committee := range committees {
|
||||
for _, validatorIdx := range committee {
|
||||
linearResult := helpers.AssignmentForValidator(bySlot, start, validatorIdx)
|
||||
mapResult, mapExists := assignmentMap[validatorIdx]
|
||||
assert.Equal(t, true, mapExists)
|
||||
require.DeepEqual(t, linearResult, mapResult)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment_WithAssignmentMap(t *testing.T) {
|
||||
func TestGetValidatorAssignment(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{4, 5, 6}},
|
||||
|
||||
// Test using CommitteeAssignments
|
||||
committeeAssignments := map[primitives.ValidatorIndex]*helpers.CommitteeAssignment{
|
||||
5: {
|
||||
Committee: []primitives.ValidatorIndex{4, 5, 6},
|
||||
AttesterSlot: start + 1,
|
||||
CommitteeIndex: primitives.CommitteeIndex(0),
|
||||
},
|
||||
}
|
||||
|
||||
// Test with pre-built assignment map (large request scenario)
|
||||
meta := &metadata{
|
||||
startSlot: start,
|
||||
committeesBySlot: bySlot,
|
||||
validatorAssignmentMap: buildValidatorAssignmentMap(bySlot, start),
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
|
||||
// Test existing validator (validator 2 is at position 1 in the committee, not position 2)
|
||||
assignment := vs.getValidatorAssignment(meta, primitives.ValidatorIndex(2))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, start, assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
assert.Equal(t, uint64(1), assignment.ValidatorCommitteeIndex)
|
||||
|
||||
// Test non-existent validator should return empty assignment
|
||||
assignment = vs.getValidatorAssignment(meta, primitives.ValidatorIndex(99))
|
||||
require.NotNil(t, assignment)
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
|
||||
start := primitives.Slot(100)
|
||||
bySlot := [][][]primitives.ValidatorIndex{
|
||||
{{1, 2, 3}},
|
||||
{{4, 5, 6}},
|
||||
}
|
||||
|
||||
// Test without assignment map (small request scenario)
|
||||
meta := &metadata{
|
||||
startSlot: start,
|
||||
committeesBySlot: bySlot,
|
||||
validatorAssignmentMap: nil, // No map - should use linear search
|
||||
committeeAssignments: committeeAssignments,
|
||||
}
|
||||
|
||||
vs := &Server{}
|
||||
@@ -676,53 +591,3 @@ func TestGetValidatorAssignment_WithoutAssignmentMap(t *testing.T) {
|
||||
assert.Equal(t, primitives.Slot(0), assignment.AttesterSlot)
|
||||
assert.Equal(t, primitives.CommitteeIndex(0), assignment.CommitteeIndex)
|
||||
}
|
||||
|
||||
func TestLoadMetadata_ThresholdBehavior(t *testing.T) {
|
||||
state, _ := util.DeterministicGenesisState(t, 128)
|
||||
epoch := primitives.Epoch(0)
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
numValidators int
|
||||
expectAssignmentMap bool
|
||||
}{
|
||||
{
|
||||
name: "Small request - below threshold",
|
||||
numValidators: 100,
|
||||
expectAssignmentMap: false,
|
||||
},
|
||||
{
|
||||
name: "Large request - at threshold",
|
||||
numValidators: validatorLookupThreshold,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
{
|
||||
name: "Large request - above threshold",
|
||||
numValidators: validatorLookupThreshold + 1000,
|
||||
expectAssignmentMap: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
meta, err := loadMetadata(t.Context(), state, epoch, tt.numValidators)
|
||||
require.NoError(t, err)
|
||||
require.NotNil(t, meta)
|
||||
|
||||
if tt.expectAssignmentMap {
|
||||
require.NotNil(t, meta.validatorAssignmentMap, "Expected assignment map to be built for large requests")
|
||||
assert.Equal(t, true, len(meta.validatorAssignmentMap) > 0, "Assignment map should not be empty")
|
||||
} else {
|
||||
// For small requests, the map should be nil (not initialized)
|
||||
if meta.validatorAssignmentMap != nil {
|
||||
t.Errorf("Expected no assignment map for small requests, got: %v", meta.validatorAssignmentMap)
|
||||
}
|
||||
}
|
||||
|
||||
// Common fields should always be set
|
||||
assert.Equal(t, true, meta.committeesAtSlot > 0)
|
||||
require.NotNil(t, meta.committeesBySlot)
|
||||
assert.Equal(t, true, len(meta.committeesBySlot) > 0)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
@@ -352,7 +352,7 @@ func (vs *Server) broadcastAndReceiveSidecars(
|
||||
dataColumnSidecars []blocks.RODataColumn,
|
||||
) error {
|
||||
if block.Version() >= version.Fulu {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars, root); err != nil {
|
||||
if err := vs.broadcastAndReceiveDataColumns(ctx, dataColumnSidecars); err != nil {
|
||||
return errors.Wrap(err, "broadcast and receive data columns")
|
||||
}
|
||||
return nil
|
||||
@@ -495,43 +495,22 @@ func (vs *Server) broadcastAndReceiveBlobs(ctx context.Context, sidecars []*ethp
|
||||
}
|
||||
|
||||
// broadcastAndReceiveDataColumns handles the broadcasting and reception of data columns sidecars.
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(
|
||||
ctx context.Context,
|
||||
roSidecars []blocks.RODataColumn,
|
||||
root [fieldparams.RootLength]byte,
|
||||
) error {
|
||||
verifiedRODataColumns := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, roSidecar := range roSidecars {
|
||||
// We build this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedRODataColumn := blocks.NewVerifiedRODataColumn(roSidecar)
|
||||
verifiedRODataColumns = append(verifiedRODataColumns, verifiedRODataColumn)
|
||||
|
||||
eg.Go(func() error {
|
||||
// Compute the subnet index based on the column index.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(roSidecar.Index)
|
||||
|
||||
if err := vs.P2P.BroadcastDataColumnSidecar(subnet, verifiedRODataColumn); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column")
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
func (vs *Server) broadcastAndReceiveDataColumns(ctx context.Context, roSidecars []blocks.RODataColumn) error {
|
||||
// We built this block ourselves, so we can upgrade the read only data column sidecar into a verified one.
|
||||
verifiedSidecars := make([]blocks.VerifiedRODataColumn, 0, len(roSidecars))
|
||||
for _, sidecar := range roSidecars {
|
||||
verifiedSidecar := blocks.NewVerifiedRODataColumn(sidecar)
|
||||
verifiedSidecars = append(verifiedSidecars, verifiedSidecar)
|
||||
}
|
||||
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedRODataColumns); err != nil {
|
||||
return errors.Wrap(err, "receive data column")
|
||||
// Broadcast sidecars (non blocking).
|
||||
if err := vs.P2P.BroadcastDataColumnSidecars(ctx, verifiedSidecars); err != nil {
|
||||
return errors.Wrap(err, "broadcast data column sidecars")
|
||||
}
|
||||
|
||||
for _, verifiedRODataColumn := range verifiedRODataColumns {
|
||||
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
|
||||
Type: operation.DataColumnSidecarReceived,
|
||||
Data: &operation.DataColumnSidecarReceivedData{DataColumn: &verifiedRODataColumn}, // #nosec G601
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return errors.Wrap(err, "wait for data columns to be broadcasted")
|
||||
// In parallel, receive sidecars.
|
||||
if err := vs.DataColumnReceiver.ReceiveDataColumns(verifiedSidecars); err != nil {
|
||||
return errors.Wrap(err, "receive data columns")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
@@ -12,13 +12,18 @@ import (
|
||||
|
||||
func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*ethpb.ProposerSlashing, []ethpb.AttSlashing) {
|
||||
var err error
|
||||
|
||||
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
|
||||
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
|
||||
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
|
||||
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
|
||||
if len(proposerSlashings) == 0 && len(attSlashings) == 0 {
|
||||
return validProposerSlashings, validAttSlashings
|
||||
}
|
||||
// ExitInformation is expensive to compute, only do it if we need it.
|
||||
exitInfo := v.ExitInformation(head)
|
||||
if err := helpers.UpdateTotalActiveBalanceCache(head, exitInfo.TotalActiveBalance); err != nil {
|
||||
log.WithError(err).Warn("Could not update total active balance cache")
|
||||
}
|
||||
proposerSlashings := vs.SlashingsPool.PendingProposerSlashings(ctx, head, false /*noLimit*/)
|
||||
validProposerSlashings := make([]*ethpb.ProposerSlashing, 0, len(proposerSlashings))
|
||||
for _, slashing := range proposerSlashings {
|
||||
_, err = blocks.ProcessProposerSlashing(ctx, head, slashing, exitInfo)
|
||||
if err != nil {
|
||||
@@ -27,8 +32,6 @@ func (vs *Server) getSlashings(ctx context.Context, head state.BeaconState) ([]*
|
||||
}
|
||||
validProposerSlashings = append(validProposerSlashings, slashing)
|
||||
}
|
||||
attSlashings := vs.SlashingsPool.PendingAttesterSlashings(ctx, head, false /*noLimit*/)
|
||||
validAttSlashings := make([]ethpb.AttSlashing, 0, len(attSlashings))
|
||||
for _, slashing := range attSlashings {
|
||||
_, err = blocks.ProcessAttesterSlashing(ctx, head, slashing, exitInfo)
|
||||
if err != nil {
|
||||
|
||||
@@ -45,7 +45,6 @@ go_library(
|
||||
"subscriber_bls_to_execution_change.go",
|
||||
"subscriber_data_column_sidecar.go",
|
||||
"subscriber_handlers.go",
|
||||
"subscriber_light_client.go",
|
||||
"subscriber_sync_committee_message.go",
|
||||
"subscriber_sync_contribution_proof.go",
|
||||
"subscription_topic_handler.go",
|
||||
@@ -113,7 +112,6 @@ go_library(
|
||||
"//config/params:go_default_library",
|
||||
"//consensus-types/blocks:go_default_library",
|
||||
"//consensus-types/interfaces:go_default_library",
|
||||
"//consensus-types/light-client:go_default_library",
|
||||
"//consensus-types/primitives:go_default_library",
|
||||
"//consensus-types/wrapper:go_default_library",
|
||||
"//container/leaky-bucket:go_default_library",
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
prysmP2P "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
@@ -187,7 +188,7 @@ func requestSidecarsFromStorage(
|
||||
requestedIndicesMap map[uint64]bool,
|
||||
roots map[[fieldparams.RootLength]byte]bool,
|
||||
) (map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, error) {
|
||||
requestedIndices := sortedSliceFromMap(requestedIndicesMap)
|
||||
requestedIndices := helpers.SortedSliceFromMap(requestedIndicesMap)
|
||||
|
||||
result := make(map[[fieldparams.RootLength]byte][]blocks.VerifiedRODataColumn, len(roots))
|
||||
|
||||
@@ -599,7 +600,7 @@ func assembleAvailableSidecarsForRoot(
|
||||
root [fieldparams.RootLength]byte,
|
||||
indices map[uint64]bool,
|
||||
) ([]blocks.VerifiedRODataColumn, error) {
|
||||
stored, err := storage.Get(root, sortedSliceFromMap(indices))
|
||||
stored, err := storage.Get(root, helpers.SortedSliceFromMap(indices))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "storage get for root %#x", root)
|
||||
}
|
||||
@@ -802,25 +803,27 @@ func sendDataColumnSidecarsRequest(
|
||||
roDataColumns = append(roDataColumns, localRoDataColumns...)
|
||||
}
|
||||
|
||||
prettyByRangeRequests := make([]map[string]any, 0, len(byRangeRequests))
|
||||
for _, request := range byRangeRequests {
|
||||
prettyRequest := map[string]any{
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"columns": request.Columns,
|
||||
if logrus.GetLevel() >= logrus.DebugLevel {
|
||||
prettyByRangeRequests := make([]map[string]any, 0, len(byRangeRequests))
|
||||
for _, request := range byRangeRequests {
|
||||
prettyRequest := map[string]any{
|
||||
"startSlot": request.StartSlot,
|
||||
"count": request.Count,
|
||||
"columns": helpers.PrettySlice(request.Columns),
|
||||
}
|
||||
|
||||
prettyByRangeRequests = append(prettyByRangeRequests, prettyRequest)
|
||||
}
|
||||
|
||||
prettyByRangeRequests = append(prettyByRangeRequests, prettyRequest)
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requestCount": len(byRangeRequests),
|
||||
"type": "byRange",
|
||||
"duration": time.Since(start),
|
||||
"requests": prettyByRangeRequests,
|
||||
}).Debug("Received data column sidecars")
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"respondedSidecars": len(roDataColumns),
|
||||
"requestCount": len(byRangeRequests),
|
||||
"type": "byRange",
|
||||
"duration": time.Since(start),
|
||||
"requests": prettyByRangeRequests,
|
||||
}).Debug("Received data column sidecars")
|
||||
|
||||
return roDataColumns, nil
|
||||
}
|
||||
|
||||
@@ -895,7 +898,7 @@ func buildByRangeRequests(
|
||||
}
|
||||
}
|
||||
|
||||
columns := sortedSliceFromMap(reference)
|
||||
columns := helpers.SortedSliceFromMap(reference)
|
||||
startSlot, endSlot := slots[0], slots[len(slots)-1]
|
||||
totalCount := uint64(endSlot - startSlot + 1)
|
||||
|
||||
@@ -920,7 +923,7 @@ func buildByRootRequest(indicesByRoot map[[fieldparams.RootLength]byte]map[uint6
|
||||
for root, indices := range indicesByRoot {
|
||||
identifier := ð.DataColumnsByRootIdentifier{
|
||||
BlockRoot: root[:],
|
||||
Columns: sortedSliceFromMap(indices),
|
||||
Columns: helpers.SortedSliceFromMap(indices),
|
||||
}
|
||||
identifiers = append(identifiers, identifier)
|
||||
}
|
||||
@@ -1173,17 +1176,6 @@ func compareIndices(left, right map[uint64]bool) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// sortedSliceFromMap converts a map[uint64]bool to a sorted slice of keys.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
slices.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// computeTotalCount calculates the total count of indices across all roots.
|
||||
func computeTotalCount(input map[[fieldparams.RootLength]byte]map[uint64]bool) int {
|
||||
totalCount := 0
|
||||
|
||||
@@ -1007,13 +1007,6 @@ func TestCompareIndices(t *testing.T) {
|
||||
require.Equal(t, true, compareIndices(left, right))
|
||||
}
|
||||
|
||||
func TestSlortedSliceFromMap(t *testing.T) {
|
||||
input := map[uint64]bool{54: true, 23: true, 35: true}
|
||||
expected := []uint64{23, 35, 54}
|
||||
actual := sortedSliceFromMap(input)
|
||||
require.DeepEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestComputeTotalCount(t *testing.T) {
|
||||
input := map[[fieldparams.RootLength]byte]map[uint64]bool{
|
||||
[fieldparams.RootLength]byte{1}: {1: true, 3: true},
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
fieldparams "github.com/OffchainLabs/prysm/v6/config/fieldparams"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
@@ -95,7 +96,7 @@ func (s *Service) processDataColumnSidecarsFromReconstruction(ctx context.Contex
|
||||
"slot": slot,
|
||||
"proposerIndex": proposerIndex,
|
||||
"count": len(unseenIndices),
|
||||
"indices": sortedSliceFromMap(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
"duration": duration,
|
||||
}).Debug("Reconstructed data column sidecars")
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ go_library(
|
||||
"//beacon-chain/blockchain:go_default_library",
|
||||
"//beacon-chain/core/feed/block:go_default_library",
|
||||
"//beacon-chain/core/feed/state:go_default_library",
|
||||
"//beacon-chain/core/helpers:go_default_library",
|
||||
"//beacon-chain/core/peerdas:go_default_library",
|
||||
"//beacon-chain/core/transition:go_default_library",
|
||||
"//beacon-chain/das:go_default_library",
|
||||
|
||||
@@ -3,12 +3,12 @@ package initialsync
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
@@ -430,9 +430,9 @@ func (f *blocksFetcher) fetchSidecars(ctx context.Context, pid peer.ID, peers []
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
prettyMissingIndicesByRoot := make(map[string][]uint64, len(missingIndicesByRoot))
|
||||
prettyMissingIndicesByRoot := make(map[string]string, len(missingIndicesByRoot))
|
||||
for root, indices := range missingIndicesByRoot {
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = sortedSliceFromMap(indices)
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = helpers.SortedPrettySliceFromMap(indices)
|
||||
}
|
||||
return "", errors.Errorf("some sidecars are still missing after fetch: %v", prettyMissingIndicesByRoot)
|
||||
}
|
||||
@@ -727,17 +727,6 @@ func (f *blocksFetcher) fetchBlobsFromPeer(ctx context.Context, bwb []blocks.Blo
|
||||
return "", errNoPeersAvailable
|
||||
}
|
||||
|
||||
// sortedSliceFromMap returns a sorted slice of keys from a map.
|
||||
func sortedSliceFromMap(m map[uint64]bool) []uint64 {
|
||||
result := make([]uint64, 0, len(m))
|
||||
for k := range m {
|
||||
result = append(result, k)
|
||||
}
|
||||
|
||||
slices.Sort(result)
|
||||
return result
|
||||
}
|
||||
|
||||
// requestBlocks is a wrapper for handling BeaconBlocksByRangeRequest requests/streams.
|
||||
func (f *blocksFetcher) requestBlocks(
|
||||
ctx context.Context,
|
||||
|
||||
@@ -1349,14 +1349,6 @@ func TestBlockFetcher_HasSufficientBandwidth(t *testing.T) {
|
||||
assert.Equal(t, 2, len(receivedPeers))
|
||||
}
|
||||
|
||||
func TestSortedSliceFromMap(t *testing.T) {
|
||||
m := map[uint64]bool{1: true, 3: true, 2: true, 4: true}
|
||||
expected := []uint64{1, 2, 3, 4}
|
||||
|
||||
actual := sortedSliceFromMap(m)
|
||||
require.DeepSSZEqual(t, expected, actual)
|
||||
}
|
||||
|
||||
func TestFetchSidecars(t *testing.T) {
|
||||
ctx := t.Context()
|
||||
t.Run("No blocks", func(t *testing.T) {
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
blockfeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/block"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/das"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db"
|
||||
@@ -479,7 +480,7 @@ func (s *Service) fetchOriginDataColumnSidecars(roBlock blocks.ROBlock, delay ti
|
||||
// Some sidecars are still missing.
|
||||
log := log.WithFields(logrus.Fields{
|
||||
"attempt": attempt,
|
||||
"missingIndices": sortedSliceFromMap(missingIndicesByRoot[root]),
|
||||
"missingIndices": helpers.SortedPrettySliceFromMap(missingIndicesByRoot[root]),
|
||||
"delay": delay,
|
||||
})
|
||||
|
||||
|
||||
@@ -4,9 +4,8 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/hex"
|
||||
"sync"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/operation"
|
||||
@@ -24,70 +23,51 @@ import (
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// This defines how often a node cleans up and processes pending attestations in the queue.
|
||||
var processPendingAttsPeriod = slots.DivideSlotBy(2 /* twice per slot */)
|
||||
var pendingAttsLimit = 10000
|
||||
var pendingAttsLimit = 32768
|
||||
|
||||
// This processes pending attestation queues on every processPendingAttsPeriod.
|
||||
func (s *Service) runPendingAttsQueue() {
|
||||
// Prevents multiple queue processing goroutines (invoked by RunEvery) from contending for data.
|
||||
mutex := new(sync.Mutex)
|
||||
async.RunEvery(s.ctx, processPendingAttsPeriod, func() {
|
||||
mutex.Lock()
|
||||
if err := s.processPendingAtts(s.ctx); err != nil {
|
||||
log.WithError(err).Debug("Could not process pending attestation")
|
||||
}
|
||||
mutex.Unlock()
|
||||
})
|
||||
}
|
||||
|
||||
// This defines how pending attestations are processed. It contains features:
|
||||
// 1. Clean up invalid pending attestations from the queue.
|
||||
// 2. Check if pending attestations can be processed when the block has arrived.
|
||||
// 3. Request block from a random peer if unable to proceed step 2.
|
||||
func (s *Service) processPendingAtts(ctx context.Context) error {
|
||||
ctx, span := trace.StartSpan(ctx, "processPendingAtts")
|
||||
// This method processes pending attestations as a "known" block as arrived. With validations,
|
||||
// the valid attestations get saved into the operation mem pool, and the invalid attestations gets deleted
|
||||
// from the sync pending pool.
|
||||
func (s *Service) processPendingAttsForBlock(ctx context.Context, bRoot [32]byte) error {
|
||||
ctx, span := trace.StartSpan(ctx, "processPendingAttsForBlock")
|
||||
defer span.End()
|
||||
|
||||
// Confirm that the pending attestation's missing block arrived and the node processed the block.
|
||||
if !s.cfg.beaconDB.HasBlock(ctx, bRoot) || !(s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) || !s.cfg.chain.InForkchoice(bRoot) {
|
||||
return fmt.Errorf("could not process unknown block root %#x", bRoot)
|
||||
}
|
||||
|
||||
// Before a node processes pending attestations queue, it verifies
|
||||
// the attestations in the queue are still valid. Attestations will
|
||||
// be deleted from the queue if invalid (i.e. getting stalled from falling too many slots behind).
|
||||
s.validatePendingAtts(ctx, s.cfg.clock.CurrentSlot())
|
||||
|
||||
s.pendingAttsLock.RLock()
|
||||
roots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
|
||||
for br := range s.blkRootToPendingAtts {
|
||||
roots = append(roots, br)
|
||||
}
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
|
||||
var pendingRoots [][32]byte
|
||||
if len(attestations) > 0 {
|
||||
s.processAttestations(ctx, attestations)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
}
|
||||
randGen := rand.NewGenerator()
|
||||
for _, bRoot := range roots {
|
||||
s.pendingAttsLock.RLock()
|
||||
attestations := s.blkRootToPendingAtts[bRoot]
|
||||
s.pendingAttsLock.RUnlock()
|
||||
// has the pending attestation's missing block arrived and the node processed block yet?
|
||||
if s.cfg.beaconDB.HasBlock(ctx, bRoot) && (s.cfg.beaconDB.HasState(ctx, bRoot) || s.cfg.beaconDB.HasStateSummary(ctx, bRoot)) && s.cfg.chain.InForkchoice(bRoot) {
|
||||
s.processAttestations(ctx, attestations)
|
||||
log.WithFields(logrus.Fields{
|
||||
"blockRoot": hex.EncodeToString(bytesutil.Trunc(bRoot[:])),
|
||||
"pendingAttsCount": len(attestations),
|
||||
}).Debug("Verified and saved pending attestations to pool")
|
||||
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
s.pendingAttsLock.Lock()
|
||||
delete(s.blkRootToPendingAtts, bRoot)
|
||||
s.pendingAttsLock.Unlock()
|
||||
} else {
|
||||
s.pendingQueueLock.RLock()
|
||||
seen := s.seenPendingBlocks[bRoot]
|
||||
s.pendingQueueLock.RUnlock()
|
||||
if !seen {
|
||||
pendingRoots = append(pendingRoots, bRoot)
|
||||
}
|
||||
// Delete the missing block root key from pending attestation queue so a node will not request for the block again.
|
||||
s.pendingAttsLock.Lock()
|
||||
delete(s.blkRootToPendingAtts, bRoot)
|
||||
pendingRoots := make([][32]byte, 0, len(s.blkRootToPendingAtts))
|
||||
s.pendingQueueLock.RLock()
|
||||
for r := range s.blkRootToPendingAtts {
|
||||
if !s.seenPendingBlocks[r] {
|
||||
pendingRoots = append(pendingRoots, r)
|
||||
}
|
||||
}
|
||||
s.pendingQueueLock.RUnlock()
|
||||
s.pendingAttsLock.Unlock()
|
||||
|
||||
// Request the blocks for the pending attestations that could not be processed.
|
||||
return s.sendBatchRootRequest(ctx, pendingRoots, randGen)
|
||||
}
|
||||
|
||||
|
||||
@@ -53,16 +53,47 @@ func TestProcessPendingAtts_NoBlockRequestBlock(t *testing.T) {
|
||||
p1.Peers().SetConnectionState(p2.PeerID(), peers.Connected)
|
||||
p1.Peers().SetChainState(p2.PeerID(), ðpb.StatusV2{})
|
||||
|
||||
chain := &mock.ChainService{Genesis: prysmTime.Now(), FinalizedCheckPoint: ðpb.Checkpoint{}}
|
||||
// Create and save block 'A' to DB
|
||||
blockA := util.NewBeaconBlock()
|
||||
util.SaveBlock(t, t.Context(), db, blockA)
|
||||
rootA, err := blockA.Block.HashTreeRoot()
|
||||
require.NoError(t, err)
|
||||
|
||||
// Save state for block 'A'
|
||||
stateA, err := util.NewBeaconState()
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, db.SaveState(t.Context(), stateA, rootA))
|
||||
|
||||
// Setup chain service with block 'A' in forkchoice
|
||||
chain := &mock.ChainService{
|
||||
Genesis: prysmTime.Now(),
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{},
|
||||
// NotFinalized: false means InForkchoice returns true
|
||||
}
|
||||
|
||||
r := &Service{
|
||||
cfg: &config{p2p: p1, beaconDB: db, chain: chain, clock: startup.NewClock(chain.Genesis, chain.ValidatorsRoot)},
|
||||
blkRootToPendingAtts: make(map[[32]byte][]any),
|
||||
seenPendingBlocks: make(map[[32]byte]bool),
|
||||
chainStarted: abool.New(),
|
||||
}
|
||||
|
||||
a := ðpb.Attestation{Data: ðpb.AttestationData{Target: ðpb.Checkpoint{Root: make([]byte, 32)}}}
|
||||
r.blkRootToPendingAtts[[32]byte{'A'}] = []any{a}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
// Add pending attestations for OTHER block roots (not block A)
|
||||
// These are blocks we don't have yet, so they should be requested
|
||||
attB := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte{'B'}, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}}
|
||||
attC := ðpb.Attestation{Data: ðpb.AttestationData{
|
||||
BeaconBlockRoot: bytesutil.PadTo([]byte{'C'}, 32),
|
||||
Target: ðpb.Checkpoint{Root: make([]byte, 32)},
|
||||
}}
|
||||
r.blkRootToPendingAtts[[32]byte{'B'}] = []any{attB}
|
||||
r.blkRootToPendingAtts[[32]byte{'C'}] = []any{attC}
|
||||
|
||||
// Process block A (which exists and has no pending attestations)
|
||||
// This should skip processing attestations for A and request blocks B and C
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), rootA))
|
||||
require.LogsContain(t, hook, "Requesting block by root")
|
||||
}
|
||||
|
||||
@@ -141,7 +172,7 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{att}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
@@ -235,7 +266,7 @@ func TestProcessPendingAtts_HasBlockSaveUnaggregatedAttElectra(t *testing.T) {
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{att}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
go func() {
|
||||
@@ -360,7 +391,7 @@ func TestProcessPendingAtts_HasBlockSaveUnAggregatedAttElectra_VerifyAlreadySeen
|
||||
r.blkRootToPendingAtts[root] = []any{
|
||||
att,
|
||||
}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
|
||||
|
||||
// Verify that the event feed receives the expected attestation.
|
||||
var wg sync.WaitGroup
|
||||
@@ -471,7 +502,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
}
|
||||
|
||||
s.blkRootToPendingAtts[r32] = []any{ðpb.SignedAggregateAttestationAndProof{Message: a, Signature: make([]byte, fieldparams.BLSSignatureLength)}}
|
||||
require.NoError(t, s.processPendingAtts(t.Context()))
|
||||
require.NoError(t, s.processPendingAttsForBlock(t.Context(), r32))
|
||||
|
||||
assert.Equal(t, false, p2p.BroadcastCalled.Load(), "Broadcasted bad aggregate")
|
||||
|
||||
@@ -510,7 +541,7 @@ func TestProcessPendingAtts_NoBroadcastWithBadSignature(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
|
||||
s.blkRootToPendingAtts[r32] = []any{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, s.processPendingAtts(t.Context()))
|
||||
require.NoError(t, s.processPendingAttsForBlock(t.Context(), r32))
|
||||
|
||||
assert.Equal(t, true, p2p.BroadcastCalled.Load(), "The good aggregate was not broadcasted")
|
||||
|
||||
@@ -601,7 +632,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAtt(t *testing.T) {
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
|
||||
@@ -696,7 +727,7 @@ func TestProcessPendingAtts_HasBlockSaveAggregatedAttElectra(t *testing.T) {
|
||||
require.NoError(t, r.cfg.beaconDB.SaveState(t.Context(), s, root))
|
||||
|
||||
r.blkRootToPendingAtts[root] = []any{ðpb.SignedAggregateAttestationAndProofElectra{Message: aggregateAndProof, Signature: aggreSig}}
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
require.NoError(t, r.processPendingAttsForBlock(t.Context(), root))
|
||||
|
||||
assert.Equal(t, 1, len(r.cfg.attPool.AggregatedAttestations()), "Did not save aggregated att")
|
||||
assert.DeepEqual(t, att, r.cfg.attPool.AggregatedAttestations()[0], "Incorrect saved att")
|
||||
@@ -780,8 +811,8 @@ func TestProcessPendingAtts_BlockNotInForkChoice(t *testing.T) {
|
||||
// Add pending attestation
|
||||
r.blkRootToPendingAtts[root] = []any{ðpb.SignedAggregateAttestationAndProof{Message: aggregateAndProof}}
|
||||
|
||||
// Process pending attestations - should not process because block is not in fork choice
|
||||
require.NoError(t, r.processPendingAtts(t.Context()))
|
||||
// Process pending attestations - should return error because block is not in fork choice
|
||||
require.ErrorContains(t, "could not process unknown block root", r.processPendingAttsForBlock(t.Context(), root))
|
||||
|
||||
// Verify attestations were not processed (should still be pending)
|
||||
assert.Equal(t, 1, len(r.blkRootToPendingAtts[root]), "Attestations should still be pending")
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/db/filesystem"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/execution"
|
||||
@@ -96,7 +97,7 @@ func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock)
|
||||
|
||||
custodyGroupCount, err := s.cfg.p2p.CustodyGroupCount()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "fetch custody group count from peer")
|
||||
return errors.Wrap(err, "custody group count")
|
||||
}
|
||||
|
||||
samplingSize := max(custodyGroupCount, samplesPerSlot)
|
||||
@@ -121,9 +122,9 @@ func (s *Service) requestAndSaveMissingDataColumnSidecars(blks []blocks.ROBlock)
|
||||
}
|
||||
|
||||
if len(missingIndicesByRoot) > 0 {
|
||||
prettyMissingIndicesByRoot := make(map[string][]uint64, len(missingIndicesByRoot))
|
||||
prettyMissingIndicesByRoot := make(map[string]string, len(missingIndicesByRoot))
|
||||
for root, indices := range missingIndicesByRoot {
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = sortedSliceFromMap(indices)
|
||||
prettyMissingIndicesByRoot[fmt.Sprintf("%#x", root)] = helpers.SortedPrettySliceFromMap(indices)
|
||||
}
|
||||
return errors.Errorf("some sidecars are still missing after fetch: %v", prettyMissingIndicesByRoot)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"slices"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
p2ptypes "github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/types"
|
||||
@@ -598,7 +599,7 @@ func isSidecarIndexRequested(request *ethpb.DataColumnSidecarsByRangeRequest) Da
|
||||
return func(sidecar blocks.RODataColumn) error {
|
||||
columnIndex := sidecar.Index
|
||||
if !requestedIndices[columnIndex] {
|
||||
requested := sortedSliceFromMap(requestedIndices)
|
||||
requested := helpers.SortedPrettySliceFromMap(requestedIndices)
|
||||
return errors.Errorf("data column sidecar index %d returned by the peer but not found in requested indices %v", columnIndex, requested)
|
||||
}
|
||||
|
||||
|
||||
@@ -274,7 +274,6 @@ func (s *Service) Start() {
|
||||
s.cfg.p2p.AddPingMethod(s.sendPingRequest)
|
||||
|
||||
s.processPendingBlocksQueue()
|
||||
s.runPendingAttsQueue()
|
||||
s.maintainPeerStatuses()
|
||||
|
||||
if params.FuluEnabled() {
|
||||
|
||||
@@ -44,6 +44,11 @@ type wrappedVal func(context.Context, peer.ID, *pubsub.Message) (pubsub.Validati
|
||||
// subHandler represents handler for a given subscription.
|
||||
type subHandler func(context.Context, proto.Message) error
|
||||
|
||||
// noopHandler is used for subscriptions that do not require anything to be done.
|
||||
var noopHandler subHandler = func(ctx context.Context, msg proto.Message) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// subscribeParameters holds the parameters that are needed to construct a set of subscriptions topics for a given
|
||||
// set of gossipsub subnets.
|
||||
type subscribeParameters struct {
|
||||
@@ -251,7 +256,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.subscribe(
|
||||
p2p.LightClientOptimisticUpdateTopicFormat,
|
||||
s.validateLightClientOptimisticUpdate,
|
||||
s.lightClientOptimisticUpdateSubscriber,
|
||||
noopHandler,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
@@ -259,7 +264,7 @@ func (s *Service) registerSubscribers(epoch primitives.Epoch, digest [4]byte) {
|
||||
s.subscribe(
|
||||
p2p.LightClientFinalityUpdateTopicFormat,
|
||||
s.validateLightClientFinalityUpdate,
|
||||
s.lightClientFinalityUpdateSubscriber,
|
||||
noopHandler,
|
||||
digest,
|
||||
)
|
||||
})
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/helpers"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/peerdas"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/transition/interop"
|
||||
"github.com/OffchainLabs/prysm/v6/config/features"
|
||||
@@ -67,7 +68,10 @@ func (s *Service) beaconBlockSubscriber(ctx context.Context, msg proto.Message)
|
||||
}
|
||||
return err
|
||||
}
|
||||
return err
|
||||
if err := s.processPendingAttsForBlock(ctx, root); err != nil {
|
||||
return errors.Wrap(err, "process pending atts for block")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// processSidecarsFromExecutionFromBlock retrieves (if available) sidecars data from the execution client,
|
||||
@@ -227,7 +231,7 @@ func (s *Service) processDataColumnSidecarsFromExecution(ctx context.Context, so
|
||||
"iteration": iteration,
|
||||
"type": source.Type(),
|
||||
"count": len(unseenIndices),
|
||||
"indices": sortedSliceFromMap(unseenIndices),
|
||||
"indices": helpers.SortedPrettySliceFromMap(unseenIndices),
|
||||
}).Debug("Constructed data column sidecars from the execution client")
|
||||
}
|
||||
|
||||
@@ -266,16 +270,9 @@ func (s *Service) broadcastAndReceiveUnseenDataColumnSidecars(
|
||||
unseenIndices[sidecar.Index] = true
|
||||
}
|
||||
|
||||
// Broadcast all the data column sidecars we reconstructed but did not see via gossip.
|
||||
for _, sidecar := range unseenSidecars {
|
||||
// Compute the subnet for this data column sidecar.
|
||||
subnet := peerdas.ComputeSubnetForDataColumnSidecar(sidecar.Index)
|
||||
|
||||
// Broadcast the data column sidecar.
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecar(subnet, sidecar); err != nil {
|
||||
// Don't return on error on broadcast failure, just log it.
|
||||
log.WithError(err).Error("Broadcast data column")
|
||||
}
|
||||
// Broadcast all the data column sidecars we reconstructed but did not see via gossip (non blocking).
|
||||
if err := s.cfg.p2p.BroadcastDataColumnSidecars(ctx, unseenSidecars); err != nil {
|
||||
return nil, errors.Wrap(err, "broadcast data column sidecars")
|
||||
}
|
||||
|
||||
// Receive data column sidecars.
|
||||
|
||||
@@ -1,66 +0,0 @@
|
||||
package sync
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed"
|
||||
statefeed "github.com/OffchainLabs/prysm/v6/beacon-chain/core/feed/state"
|
||||
lightclientTypes "github.com/OffchainLabs/prysm/v6/consensus-types/light-client"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) lightClientOptimisticUpdateSubscriber(_ context.Context, msg proto.Message) error {
|
||||
update, err := lightclientTypes.NewWrappedOptimisticUpdate(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attestedHeaderRoot, err := update.AttestedHeader().Beacon().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", update.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", update.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Saving newly received light client optimistic update.")
|
||||
|
||||
s.lcStore.SetLastOptimisticUpdate(update, false)
|
||||
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientOptimisticUpdate,
|
||||
Data: update,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Service) lightClientFinalityUpdateSubscriber(_ context.Context, msg proto.Message) error {
|
||||
update, err := lightclientTypes.NewWrappedFinalityUpdate(msg)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
attestedHeaderRoot, err := update.AttestedHeader().Beacon().HashTreeRoot()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", update.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", update.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Saving newly received light client finality update.")
|
||||
|
||||
s.lcStore.SetLastFinalityUpdate(update, false)
|
||||
|
||||
s.cfg.stateNotifier.StateFeed().Send(&feed.Event{
|
||||
Type: statefeed.LightClientFinalityUpdate,
|
||||
Data: update,
|
||||
})
|
||||
|
||||
return nil
|
||||
}
|
||||
@@ -9,12 +9,10 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/OffchainLabs/prysm/v6/async/abool"
|
||||
"github.com/OffchainLabs/prysm/v6/async/event"
|
||||
mockChain "github.com/OffchainLabs/prysm/v6/beacon-chain/blockchain/testing"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/cache"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/core/signing"
|
||||
db "github.com/OffchainLabs/prysm/v6/beacon-chain/db/testing"
|
||||
lightClient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/operations/slashings"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p"
|
||||
"github.com/OffchainLabs/prysm/v6/beacon-chain/p2p/encoder"
|
||||
@@ -28,7 +26,6 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/primitives"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
pb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/runtime/version"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -676,143 +673,3 @@ func createPeer(t *testing.T, topics ...string) *p2ptest.TestP2P {
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func TestSubscribe_ReceivesLCOptimisticUpdate(t *testing.T) {
|
||||
origNC := params.BeaconConfig()
|
||||
// restore network config after test completes
|
||||
defer func() {
|
||||
params.OverrideBeaconConfig(origNC)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
genesisDrift := slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals
|
||||
chainService := &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
|
||||
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
beaconDB: d,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lcStore,
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := p2p.LightClientOptimisticUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
r.subscribe(topic, r.validateLightClientOptimisticUpdate, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.lightClientOptimisticUpdateSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}, p2pService.Digest)
|
||||
|
||||
r.markForChainStart()
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair, util.WithSupermajority(0))
|
||||
update, err := lightClient.NewLightClientOptimisticUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock)
|
||||
require.NoError(t, err, "Error generating light client optimistic update")
|
||||
|
||||
p2pService.ReceivePubSub(topic, update.Proto())
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
u := r.lcStore.LastOptimisticUpdate()
|
||||
assert.DeepEqual(t, update.Proto(), u.Proto())
|
||||
}
|
||||
|
||||
func TestSubscribe_ReceivesLCFinalityUpdate(t *testing.T) {
|
||||
origNC := params.BeaconConfig()
|
||||
// restore network config after test completes
|
||||
defer func() {
|
||||
params.OverrideBeaconConfig(origNC)
|
||||
}()
|
||||
|
||||
params.SetupTestConfigCleanup(t)
|
||||
p2pService := p2ptest.NewTestP2P(t)
|
||||
ctx := t.Context()
|
||||
cfg := params.BeaconConfig().Copy()
|
||||
cfg.AltairForkEpoch = 1
|
||||
cfg.ForkVersionSchedule[[4]byte{1, 0, 0, 0}] = 1
|
||||
params.OverrideBeaconConfig(cfg)
|
||||
|
||||
secondsPerSlot := int(params.BeaconConfig().SecondsPerSlot)
|
||||
slotIntervals := int(params.BeaconConfig().IntervalsPerSlot)
|
||||
slotsPerEpoch := int(params.BeaconConfig().SlotsPerEpoch)
|
||||
|
||||
genesisDrift := slotsPerEpoch*secondsPerSlot + 2*secondsPerSlot + secondsPerSlot/slotIntervals
|
||||
chainService := &mockChain.ChainService{
|
||||
ValidatorsRoot: [32]byte{'A'},
|
||||
Genesis: time.Unix(time.Now().Unix()-int64(genesisDrift), 0),
|
||||
}
|
||||
d := db.SetupDB(t)
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), d)
|
||||
|
||||
r := Service{
|
||||
ctx: ctx,
|
||||
cfg: &config{
|
||||
p2p: p2pService,
|
||||
initialSync: &mockSync.Sync{IsSyncing: false},
|
||||
chain: chainService,
|
||||
beaconDB: d,
|
||||
clock: startup.NewClock(chainService.Genesis, chainService.ValidatorsRoot),
|
||||
stateNotifier: &mockChain.MockStateNotifier{},
|
||||
},
|
||||
chainStarted: abool.New(),
|
||||
lcStore: lcStore,
|
||||
subHandler: newSubTopicHandler(),
|
||||
}
|
||||
markInitSyncComplete(t, &r)
|
||||
topic := p2p.LightClientFinalityUpdateTopicFormat
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
var err error
|
||||
p2pService.Digest, err = r.currentForkDigest()
|
||||
require.NoError(t, err)
|
||||
r.subscribe(topic, r.validateLightClientFinalityUpdate, func(ctx context.Context, msg proto.Message) error {
|
||||
require.NoError(t, r.lightClientFinalityUpdateSubscriber(ctx, msg))
|
||||
wg.Done()
|
||||
return nil
|
||||
}, p2pService.Digest)
|
||||
|
||||
r.markForChainStart()
|
||||
|
||||
l := util.NewTestLightClient(t, version.Altair, util.WithSupermajority(0))
|
||||
update, err := lightClient.NewLightClientFinalityUpdateFromBeaconState(l.Ctx, l.State, l.Block, l.AttestedState, l.AttestedBlock, l.FinalizedBlock)
|
||||
require.NoError(t, err, "Error generating light client finality update")
|
||||
|
||||
p2pService.ReceivePubSub(topic, update.Proto())
|
||||
|
||||
if util.WaitTimeout(&wg, time.Second) {
|
||||
t.Fatal("Did not receive PubSub in 1 second")
|
||||
}
|
||||
u := r.lcStore.LastFinalityUpdate()
|
||||
assert.DeepEqual(t, update.Proto(), u.Proto())
|
||||
}
|
||||
|
||||
@@ -429,12 +429,12 @@ func TestService_ValidateBlsToExecutionChange(t *testing.T) {
|
||||
svc := NewService(ctx, append(opts, tt.svcopts...)...)
|
||||
markInitSyncComplete(t, svc)
|
||||
svc, tt.args.topic = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
|
||||
go svc.Start()
|
||||
if tt.clock == nil {
|
||||
tt.clock = startup.NewClock(time.Now(), [32]byte{})
|
||||
}
|
||||
require.NoError(t, cw.SetClock(tt.clock))
|
||||
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
|
||||
go svc.Start()
|
||||
|
||||
marshalledObj, err := tt.args.msg.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
lightclient "github.com/OffchainLabs/prysm/v6/beacon-chain/light-client"
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/interfaces"
|
||||
"github.com/OffchainLabs/prysm/v6/monitoring/tracing"
|
||||
@@ -14,6 +13,7 @@ import (
|
||||
pubsub "github.com/libp2p/go-libp2p-pubsub"
|
||||
"github.com/libp2p/go-libp2p/core/peer"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid peer.ID, msg *pubsub.Message) (pubsub.ValidationResult, error) {
|
||||
@@ -31,6 +31,12 @@ func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid p
|
||||
_, span := trace.StartSpan(ctx, "sync.validateLightClientOptimisticUpdate")
|
||||
defer span.End()
|
||||
|
||||
currentUpdate := s.lcStore.LastOptimisticUpdate()
|
||||
if currentUpdate == nil {
|
||||
log.Debug("No existing optimistic update to compare against. Ignoring.")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -64,12 +70,12 @@ func (s *Service) validateLightClientOptimisticUpdate(ctx context.Context, pid p
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if !lightclient.IsBetterOptimisticUpdate(newUpdate, s.lcStore.LastOptimisticUpdate()) {
|
||||
if !proto.Equal(newUpdate.Proto(), currentUpdate.Proto()) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Newly received light client optimistic update ignored. current update is better.")
|
||||
}).Debug("Received light client optimistic update is different from the local one. Ignoring.")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
@@ -98,6 +104,12 @@ func (s *Service) validateLightClientFinalityUpdate(ctx context.Context, pid pee
|
||||
_, span := trace.StartSpan(ctx, "sync.validateLightClientFinalityUpdate")
|
||||
defer span.End()
|
||||
|
||||
currentUpdate := s.lcStore.LastFinalityUpdate()
|
||||
if currentUpdate == nil {
|
||||
log.Debug("No existing finality update to compare against. Ignoring.")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
m, err := s.decodePubsubMessage(msg)
|
||||
if err != nil {
|
||||
tracing.AnnotateError(span, err)
|
||||
@@ -131,12 +143,12 @@ func (s *Service) validateLightClientFinalityUpdate(ctx context.Context, pid pee
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
if !lightclient.IsFinalityUpdateValidForBroadcast(newUpdate, s.lcStore.LastFinalityUpdate()) {
|
||||
if !proto.Equal(newUpdate.Proto(), currentUpdate.Proto()) {
|
||||
log.WithFields(logrus.Fields{
|
||||
"attestedSlot": fmt.Sprintf("%d", newUpdate.AttestedHeader().Beacon().Slot),
|
||||
"signatureSlot": fmt.Sprintf("%d", newUpdate.SignatureSlot()),
|
||||
"attestedHeaderRoot": fmt.Sprintf("%x", attestedHeaderRoot),
|
||||
}).Debug("Newly received light client finality update ignored. current update is better.")
|
||||
}).Debug("Received light client finality update is different from the local one. ignoring.")
|
||||
return pubsub.ValidationIgnore, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -26,9 +26,13 @@ func TestValidateLightClientOptimisticUpdate_NilMessageOrTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}, lcStore: lcStore}
|
||||
mockUpdate, err := util.MockOptimisticUpdate()
|
||||
require.NoError(t, err)
|
||||
s.lcStore.SetLastOptimisticUpdate(mockUpdate, false)
|
||||
|
||||
_, err := s.validateLightClientOptimisticUpdate(ctx, "", nil)
|
||||
_, err = s.validateLightClientOptimisticUpdate(ctx, "", nil)
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
|
||||
_, err = s.validateLightClientOptimisticUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{}})
|
||||
@@ -72,27 +76,27 @@ func TestValidateLightClientOptimisticUpdate(t *testing.T) {
|
||||
name: "no previous update",
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "not enough time passed",
|
||||
genesisDrift: -secondsPerSlot / slotIntervals,
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update has no age advantage",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update is better - younger",
|
||||
name: "new update is the same",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "new update is different",
|
||||
genesisDrift: secondsPerSlot,
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -149,9 +153,13 @@ func TestValidateLightClientFinalityUpdate_NilMessageOrTopic(t *testing.T) {
|
||||
params.SetupTestConfigCleanup(t)
|
||||
ctx := t.Context()
|
||||
p := p2ptest.NewTestP2P(t)
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}}
|
||||
lcStore := lightClient.NewLightClientStore(&p2ptest.FakeP2P{}, new(event.Feed), testDB.SetupDB(t))
|
||||
s := &Service{cfg: &config{p2p: p, initialSync: &mockSync.Sync{}}, lcStore: lcStore}
|
||||
mockUpdate, err := util.MockFinalityUpdate()
|
||||
require.NoError(t, err)
|
||||
s.lcStore.SetLastFinalityUpdate(mockUpdate, false)
|
||||
|
||||
_, err := s.validateLightClientFinalityUpdate(ctx, "", nil)
|
||||
_, err = s.validateLightClientFinalityUpdate(ctx, "", nil)
|
||||
require.ErrorIs(t, err, errNilPubsubMessage)
|
||||
|
||||
_, err = s.validateLightClientFinalityUpdate(ctx, "", &pubsub.Message{Message: &pb.Message{}})
|
||||
@@ -195,44 +203,26 @@ func TestValidateLightClientFinalityUpdate(t *testing.T) {
|
||||
name: "no previous update",
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "not enough time passed",
|
||||
genesisDrift: -secondsPerSlot / slotIntervals,
|
||||
oldUpdateOptions: nil,
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update has no advantage",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "new update is better - age",
|
||||
name: "new update is the same",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "new update is different",
|
||||
genesisDrift: secondsPerSlot,
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{util.WithIncreasedFinalizedSlot(1)},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "new update is better - supermajority",
|
||||
oldUpdateOptions: []util.LightClientOption{},
|
||||
newUpdateOptions: []util.LightClientOption{util.WithSupermajority(0)},
|
||||
expectedResult: pubsub.ValidationAccept,
|
||||
},
|
||||
{
|
||||
name: "old update is better - supermajority",
|
||||
oldUpdateOptions: []util.LightClientOption{util.WithSupermajority(0)},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
{
|
||||
name: "old update is better - age",
|
||||
oldUpdateOptions: []util.LightClientOption{util.WithIncreasedAttestedSlot(1)},
|
||||
newUpdateOptions: []util.LightClientOption{},
|
||||
expectedResult: pubsub.ValidationIgnore,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -410,9 +410,9 @@ func TestService_ValidateSyncCommitteeMessage(t *testing.T) {
|
||||
var clock *startup.Clock
|
||||
svc, tt.args.topic, clock = tt.setupSvc(svc, tt.args.msg, tt.args.topic)
|
||||
markInitSyncComplete(t, svc)
|
||||
go svc.Start()
|
||||
require.NoError(t, cw.SetClock(clock))
|
||||
svc.verifierWaiter = verification.NewInitializerWaiter(cw, chainService.ForkChoiceStore, svc.cfg.stateGen)
|
||||
go svc.Start()
|
||||
|
||||
marshalledObj, err := tt.args.msg.MarshalSSZ()
|
||||
assert.NoError(t, err)
|
||||
|
||||
@@ -2,6 +2,7 @@ package verification
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/sha256"
|
||||
"fmt"
|
||||
"strings"
|
||||
"time"
|
||||
@@ -524,24 +525,39 @@ func columnErrBuilder(baseErr error) error {
|
||||
return errors.Wrap(baseErr, errColumnsInvalid.Error())
|
||||
}
|
||||
|
||||
func inclusionProofKey(c blocks.RODataColumn) ([160]byte, error) {
|
||||
var key [160]byte
|
||||
if len(c.KzgCommitmentsInclusionProof) != 4 {
|
||||
// incluseionProofKey computes a unique key based on the KZG commitments,
|
||||
// the KZG commitments inclusion proof, and the signed block header root.
|
||||
func inclusionProofKey(c blocks.RODataColumn) ([32]byte, error) {
|
||||
const (
|
||||
commsIncProofLen = 4
|
||||
commsIncProofByteCount = commsIncProofLen * 32
|
||||
)
|
||||
|
||||
if len(c.KzgCommitmentsInclusionProof) != commsIncProofLen {
|
||||
// This should be already enforced by ssz unmarshaling; still check so we don't panic on array bounds.
|
||||
return key, columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
return [32]byte{}, columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
|
||||
commsByteCount := len(c.KzgCommitments) * fieldparams.KzgCommitmentSize
|
||||
unhashedKey := make([]byte, 0, commsIncProofByteCount+fieldparams.RootLength+commsByteCount)
|
||||
|
||||
// Include the commitments inclusion proof in the key.
|
||||
for _, proof := range c.KzgCommitmentsInclusionProof {
|
||||
unhashedKey = append(unhashedKey, proof...)
|
||||
}
|
||||
|
||||
// Include the block root in the key.
|
||||
root, err := c.SignedBlockHeader.HashTreeRoot()
|
||||
if err != nil {
|
||||
return [160]byte{}, columnErrBuilder(errors.Wrap(err, "hash tree root"))
|
||||
return [32]byte{}, columnErrBuilder(errors.Wrap(err, "hash tree root"))
|
||||
}
|
||||
|
||||
for i := range c.KzgCommitmentsInclusionProof {
|
||||
if copy(key[32*i:32*i+32], c.KzgCommitmentsInclusionProof[i]) != 32 {
|
||||
return key, columnErrBuilder(ErrSidecarInclusionProofInvalid)
|
||||
}
|
||||
unhashedKey = append(unhashedKey, root[:]...)
|
||||
|
||||
// Include the commitments in the key.
|
||||
for _, commitment := range c.KzgCommitments {
|
||||
unhashedKey = append(unhashedKey, commitment...)
|
||||
}
|
||||
|
||||
copy(key[128:], root[:])
|
||||
return key, nil
|
||||
return sha256.Sum256(unhashedKey), nil
|
||||
}
|
||||
|
||||
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Replace fmt.Printf with proper test error handling in web3signer keymanager tests, using require.NoError(t, err) instead of t.Fatalf for better error handling and debugging.
|
||||
4
changelog/bastin_lc-p2p-validation.md
Normal file
4
changelog/bastin_lc-p2p-validation.md
Normal file
@@ -0,0 +1,4 @@
|
||||
### Changed
|
||||
|
||||
- Compare received LC messages over gossipsub with locally computed ones before forwarding. Also no longer save updates
|
||||
from gossipsub, just validate and forward.
|
||||
3
changelog/james-prysm_fix-config-parsing.md
Normal file
3
changelog/james-prysm_fix-config-parsing.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- Fixing Unsupported config field kind; value forwarded verbatim errors for type string.
|
||||
3
changelog/james-prysm_fix-da-metric.md
Normal file
3
changelog/james-prysm_fix-da-metric.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- da metric was not writing correctly because if statement on err was accidently flipped
|
||||
3
changelog/james-prysm_improve-duties-v2.md
Normal file
3
changelog/james-prysm_improve-duties-v2.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- adding in improvements to getduties v2, replaces helpers.PrecomputeCommittees() ( exepensive ) with CommitteeAssignments
|
||||
3
changelog/james-prysm_skip-omit-config-values.md
Normal file
3
changelog/james-prysm_skip-omit-config-values.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fix /eth/v1/config/spec endpoint to properly skip omitted values.
|
||||
3
changelog/james_prysm-fix-blocker-notfound.md
Normal file
3
changelog/james_prysm-fix-blocker-notfound.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
|
||||
- fixed regression introduced in PR #15715 , blocker now returns an error for not found, and error handling correctly handles error and returns 404 instead of 500
|
||||
3
changelog/jihoonsong_support-fulu-genesis.md
Normal file
3
changelog/jihoonsong_support-fulu-genesis.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Added
|
||||
|
||||
- Support Fulu genesis block.
|
||||
3
changelog/jtraglia_specrefs_compute_fork_digest.md
Normal file
3
changelog/jtraglia_specrefs_compute_fork_digest.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Add sources for compute_fork_digest to specrefs
|
||||
2
changelog/kasey_log-invalid-ee-root.md
Normal file
2
changelog/kasey_log-invalid-ee-root.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Ignored
|
||||
- Adding context to logs around execution engine notification failures.
|
||||
2
changelog/manu-fix-cgc-not-initialized.md
Normal file
2
changelog/manu-fix-cgc-not-initialized.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Fixed
|
||||
- In P2P service start, wait for the custody info to be correctly initialized.
|
||||
3
changelog/manu-fix-log.md
Normal file
3
changelog/manu-fix-log.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Ignored
|
||||
- `requestAndSaveMissingDataColumnSidecars`: Fix log
|
||||
|
||||
3
changelog/manu-go-1.25.1.md
Normal file
3
changelog/manu-go-1.25.1.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Changed
|
||||
|
||||
- Updated go.mod to v1.25.1
|
||||
3
changelog/manu-inclusion-cache-key.md
Normal file
3
changelog/manu-inclusion-cache-key.md
Normal file
@@ -0,0 +1,3 @@
|
||||
### Fixed
|
||||
- `inclusionProofKey`: Include the commitments in the key.
|
||||
|
||||
2
changelog/manu-logs-datacolumns.md
Normal file
2
changelog/manu-logs-datacolumns.md
Normal file
@@ -0,0 +1,2 @@
|
||||
## Changed
|
||||
- Improve logging of data column sidecars
|
||||
2
changelog/manu-peerdas-c-kzg-update.md
Normal file
2
changelog/manu-peerdas-c-kzg-update.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- `c-kzg-4844`: Update from `v2.1.1` to `v2.1.5`
|
||||
2
changelog/manu-peerdas-columns-logs.md
Normal file
2
changelog/manu-peerdas-columns-logs.md
Normal file
@@ -0,0 +1,2 @@
|
||||
### Changed
|
||||
- Aggregate logs when broadcasting data column sidecars (one per root instead of one per sidecar)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user