Compare commits

...

53 Commits

Author SHA1 Message Date
prylabs-bulldozer[bot]
40a3391229 Merge refs/heads/develop into revert-12739-shiftToDebug 2023-08-16 00:48:00 +00:00
Potuz
418959565f set optimistic status in head at init sync (#12748) 2023-08-16 00:47:56 +00:00
Nishant Das
6622882533 Revert "Shift Error Logs To Debug (#12739)"
This reverts commit 4098d098aa.
2023-08-16 08:09:23 +08:00
Raul Jordan
8229f3eb84 Prysm Web UI Release v2.0.4 (#12746)
Co-authored-by: james-prysm <james-prysm@users.noreply.github.com>
2023-08-15 21:11:44 +00:00
Nishant Das
4098d098aa Shift Error Logs To Debug (#12739) 2023-08-15 14:44:54 +00:00
anukul
46c72798c7 HTTP Beacon API: /eth/v1/validator/attestation_data (#12634)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-14 17:56:36 +03:00
Preston Van Loon
a5474200de Update geth to 1.12.2 (#12731) 2023-08-14 17:48:11 +08:00
Preston Van Loon
a85b4445fc Update bazel to 6.3.2 (#12725)
* Update bazel version to v6.3.2

* Disable legacy external runfile generation for CI

* Revert "Disable legacy external runfile generation for CI"

This reverts commit 1911f293d6.

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-11 19:08:33 +00:00
Preston Van Loon
751dd847b8 Update rules go & gazelle (#12721)
* Update rules_go to v0.40.0

* Update gazelle to v0.26.0

* Update gazelle to v0.27.0

* Update gazelle to v0.28.0

* Update gazelle to v0.29.0

* Update gazelle to v0.30.0

* Update gazelle to v0.31.0
2023-08-11 16:39:35 +00:00
Preston Van Loon
aeb7a45864 Update geth to 1.12.1 (#12718)
* Update geth to 1.12.1

* Fix //cmd/validator/flags:go_default_test

* fix //beacon-chain/node:go_default_test

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-08-11 10:45:42 +00:00
Radosław Kapka
e952fd802b HTTP Beacon API: /eth/v1/validator/beacon_committee_subscriptions (#12700)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

* in progress

* implementation

* remove test file

* remove duplicate

* tests

* HTTP Beacon API: `/eth/v1/validator/sync_committee_subscriptions`

* pointers, pointers everywhere

* fix

* fix after merge

* implementation

* tests

* test fixes

* review

* clear cache in validator tests

* add missing stuff

* compilation fix

* remove `required` from bool

* remove time fetcher from tests

* no need to convert

* add conversion

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-10 18:00:19 +00:00
Nishant Das
b511eef848 Update BLST to v0.3.11 (#12717)
* add changes

* Remove server.c exclusion in headers

---------

Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-08-10 22:51:19 +08:00
dependabot[bot]
7aa043892b Bump github.com/libp2p/go-libp2p from 0.27.5 to 0.27.8 (#12709)
* Bump github.com/libp2p/go-libp2p from 0.27.5 to 0.27.8

Bumps [github.com/libp2p/go-libp2p](https://github.com/libp2p/go-libp2p) from 0.27.5 to 0.27.8.
- [Release notes](https://github.com/libp2p/go-libp2p/releases)
- [Changelog](https://github.com/libp2p/go-libp2p/blob/master/CHANGELOG.md)
- [Commits](https://github.com/libp2p/go-libp2p/compare/v0.27.5...v0.27.8)

---
updated-dependencies:
- dependency-name: github.com/libp2p/go-libp2p
  dependency-type: direct:production
...

Signed-off-by: dependabot[bot] <support@github.com>

* gazelle and remove libp2p patch. The patch is merged upstream as of this version

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <pvanloon@offchainlabs.com>
2023-08-09 17:24:07 +00:00
Radosław Kapka
36be057a11 HTTP Beacon API: /eth/v1/node/syncing (#12706)
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-09 17:23:59 +02:00
Radosław Kapka
049e608c75 HTTP Beacon API: /eth/v1/validator/sync_committee_subscriptions (#12689)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

* in progress

* implementation

* remove test file

* remove duplicate

* tests

* HTTP Beacon API: `/eth/v1/validator/sync_committee_subscriptions`

* pointers, pointers everywhere

* fix

* fix after merge

* review

* James' review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-08 22:54:04 +00:00
Preston Van Loon
4541598850 Update go to 1.20.7 (#12707) 2023-08-08 21:39:22 +00:00
Sammy Rosso
8c08854dd0 Fix prysmctl writing empty JSON/YAML files (#12599)
* Setup

* add in marshalable

* Cleanup

* Raul' review

* Remove yaml Marshaler

* minimal fixes

* same imports

---------

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-08-08 15:20:41 +00:00
m3diumrare
d2ff995eb2 Fix reported effective balance for unknown/pending validators (#12693)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-07 09:19:02 -05:00
anukul
56a0315dde fix: set CoreService in beaconv1alpha1.Server (#12702) 2023-08-06 18:16:54 -07:00
anukul
634133fedc use struct in beacon-chain/rpc/core to store dependencies (#12701) 2023-08-05 22:54:12 +02:00
terencechain
c1c1b7ecfa Feat: aggregate parallel default (#12699) 2023-08-04 16:03:10 +00:00
Nishant Das
9a4670ec64 add changes (#12697)
Co-authored-by: terencechain <terence@prysmaticlabs.com>
2023-08-04 22:05:47 +08:00
Radosław Kapka
a664a07303 Multi Value Slice (#12616)
* multi value slice

* extract helper function

* comments

* setup godoc fix

* value benchmarks

* use guid

* fix bug when deleting items

* remove callback and rename MultiValue

* godoc

* tiny change

* Nishant's review

* typos

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-08-04 12:42:54 +00:00
Potuz
dd14d5cef0 refactor slot tickers with intervals (#12440)
* refactor slot tickers with intervals

* GoDoc

---------

Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2023-08-03 20:55:16 -03:00
Radosław Kapka
3a09405bb7 HTTP Beacon API: /eth/v1/validator/aggregate_and_proofs (#12686)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

* in progress

* implementation

* remove test file

* remove duplicate

* tests

* review

* test fixes

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-03 22:24:23 +00:00
terencechain
cb59081887 Remove: span for convert to indexed attestation (#12687)
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-03 16:17:46 +00:00
Nishant Das
a820d4dcc8 Minor Optimization on InnerShuffleList (#12690)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-03 06:56:07 +00:00
terencechain
dbc17cf2ca Fix: use correct context for UpdateCommitteeCache (#12691) 2023-08-03 06:00:58 +00:00
terencechain
d38762772a test: add execution payload operation tests (#12685)
* test: add execution payload operation tests

* thanks!

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Update testing/spectest/shared/bellatrix/operations/execution_payload.go

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-02 08:16:46 +00:00
Potuz
e5d1eb885d Update caches blocking (#12679)
* update NSC together with epoch boundary caches

* block when updating caches

* reviews

* removal of very useful helper because the reviewers requested it :)

* use IsEpochEnd
2023-08-02 05:59:09 +00:00
terencechain
a9d7701081 test: add missing random and fork transition spec tests (#12681)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-08-01 14:08:40 +00:00
Bharath Vedartham
33abe6eb90 add metric to check if validator is in next sync commitee (#12650)
* add metric to check if validator is in next sync commitee

* Update validator/client/validator.go

* Update validator/client/metrics.go

Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
Co-authored-by: Sammy Rosso <15244892+saolyn@users.noreply.github.com>
2023-08-01 08:25:10 +00:00
terencechain
c342c9a14e fix: update nil check for new validator (#12677) 2023-08-01 04:13:57 +00:00
Radosław Kapka
a9b003e1fe HTTP Beacon API: /eth/v1/validator/contribution_and_proofs (#12660)
* HTTP Beacon API: `/eth/v1/validator/contribution_and_proofs`

* add comment to invalid test case

* fix validation and test

* review

---------

Co-authored-by: james-prysm <90280386+james-prysm@users.noreply.github.com>
2023-07-31 17:32:39 +00:00
Radosław Kapka
955175b7eb Update server-side events dependency (#12676)
* Update server-side events dependency

* go sum

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-31 15:51:19 +00:00
Potuz
c17682940e only handle epoch boundary on canonical blocks (#12666)
* only handle epoch boundary on canonical blocks

* do not fetch headstate
2023-07-31 12:07:09 -03:00
Nishant Das
db450f53a4 Fix Update Of Committee Cache (#12668)
* fix it

* sammy's comment

* fix tests

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-28 13:34:03 +00:00
terencechain
493905ee9e feat: add state regren duration metric (#12672) 2023-07-27 19:33:25 +00:00
james-prysm
e449724034 Bugfix: proposer-settings edge case for activating validators (#12671) 2023-07-27 16:45:16 +00:00
terencechain
a44c209be0 feat: add metric for block gossip time (#12670) 2023-07-27 15:17:24 +00:00
terencechain
183e72b194 fix: update epoch + 1 (#12667) 2023-07-27 18:53:36 +08:00
Potuz
337c254161 update shuffling caches at epoch boundary (#12661)
* update shuffling caches at epoch boundary

* move span

* do not advance to a past slot
2023-07-26 18:46:18 +00:00
james-prysm
ec60cab2bf Bugfix: Metrics adding fix pending validators balance (#12665) 2023-07-26 16:09:24 +00:00
Potuz
ded00495e7 Parallelize cl el validation (#12590)
* Parallelize EL and CL block validation

* fix by Nishant
2023-07-26 12:23:51 -03:00
Nishant Das
113172d8aa Exit Initial Sync Early (#12659)
* add check

* fix test
2023-07-26 00:38:25 +00:00
Radosław Kapka
2b40c44879 Use the correct root in consensus validation (#12657)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-07-25 12:04:08 +02:00
Radosław Kapka
fc193b09bf HTTP Beacon API: /eth/v1/validator/aggregate_attestation (#12643)
* initial implementation

* it works

* tests

* extracted helper functions

* fix code

---------

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-24 22:16:45 +00:00
Radosław Kapka
a0d53f5155 Register GetValidatorPerformance as POST (#12658) 2023-07-24 21:17:08 +00:00
terencechain
ff3d2bc69f fix: add mainnet withdrawals and bls spec tests (#12655) 2023-07-24 14:13:02 +00:00
Nishant Das
dd403f830c clean up logging (#12653) 2023-07-24 20:29:50 +08:00
Raul Jordan
e9c8e84618 Integrate Read-Only-Lock-on-Get LRU Cache for Public Keys (#12646)
* use new lru cache

* update build

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2023-07-22 04:37:53 +00:00
james-prysm
9c250dd4c2 Builder gas limit fix default to 0 in some cases (#12647)
* adding fix for nethermind's findings on gaslimit =0 on some default setups

* adding in default gaslimit check

* fixing linting on complexity

* fixing cognitive complexity linting marker

* fixing unit test and bug with referencing

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-07-22 03:44:50 +00:00
Potuz
f97db3b738 Different parallel hashing (#12639)
* Paralellize hashing of large lists

* add unit test

* add file

* do not parallelize on low processor count

* revert minimal proc count

---------

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2023-07-21 20:36:20 -04:00
197 changed files with 7158 additions and 5417 deletions

View File

@@ -1 +1 @@
6.2.1
6.3.2

View File

@@ -133,8 +133,8 @@ nogo(
# nogo checks that fail with coverage enabled.
":coverage_enabled": [],
"//conditions:default": [
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
"@org_golang_x_tools//go/analysis/passes/composite:go_default_library",
"@org_golang_x_tools//go/analysis/passes/lostcancel:go_default_library",
],
}),
)

View File

@@ -67,10 +67,10 @@ bazel_skylib_workspace()
http_archive(
name = "bazel_gazelle",
sha256 = "5982e5463f171da99e3bdaeff8c0f48283a7a5f396ec5282910b9e8a49c0dd7e",
sha256 = "29d5dafc2a5582995488c6735115d1d366fcd6a0fc2e2a153f02988706349825",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.25.0/bazel-gazelle-v0.25.0.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.31.0/bazel-gazelle-v0.31.0.tar.gz",
],
)
@@ -94,10 +94,10 @@ http_archive(
# Expose internals of go_test for custom build transitions.
"//third_party:io_bazel_rules_go_test.patch",
],
sha256 = "6b65cb7917b4d1709f9410ffe00ecf3e160edf674b78c54a894471320862184f",
sha256 = "bfc5ce70b9d1634ae54f4e7b495657a18a04e0d596785f672d35d5f505ab491a",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.39.0/rules_go-v0.39.0.zip",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
"https://github.com/bazelbuild/rules_go/releases/download/v0.40.0/rules_go-v0.40.0.zip",
],
)
@@ -172,7 +172,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.20.6",
go_version = "1.20.7",
nogo = "@//:nogo",
)
@@ -312,13 +312,6 @@ filegroup(
url = "https://github.com/eth-clients/eth2-networks/archive/7b4897888cebef23801540236f73123e21774954.tar.gz",
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "7a182df18df1debabd9e36ae07c8edfa1378b8424a04561b674d933b965372b3",
strip_prefix = "buildtools-f2aed9ee205d62d45c55cfabbfd26342f8526862",
url = "https://github.com/bazelbuild/buildtools/archive/f2aed9ee205d62d45c55cfabbfd26342f8526862.zip",
)
http_archive(
name = "com_google_protobuf",
sha256 = "4e176116949be52b0408dfd24f8925d1eb674a781ae242a75296b17a1c721395",
@@ -342,9 +335,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "5006614c33e358699b4e072c649cd4c3866f7d41a691449d5156f6c6e07a4c60",
sha256 = "cc5b2dc9c4ea27b617ab3f31d068134f0b5c4fd173919b32b00613b0016b029a",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.3/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v2.0.4/prysm-web-ui.tar.gz",
],
)
@@ -391,10 +384,6 @@ load("@bazel_gazelle//:deps.bzl", "gazelle_dependencies")
gazelle_dependencies()
load("@com_github_bazelbuild_buildtools//buildifier:deps.bzl", "buildifier_dependencies")
buildifier_dependencies()
load("@com_google_protobuf//:protobuf_deps.bzl", "protobuf_deps")
protobuf_deps()

View File

@@ -88,6 +88,7 @@ go_library(
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_opencensus_go//trace:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -182,7 +182,7 @@ func (s *Service) saveHead(ctx context.Context, newHeadRoot [32]byte, headBlock
// This gets called to update canonical root mapping. It does not save head block
// root in DB. With the inception of initial-sync-cache-state flag, it uses finalized
// check point as anchors to resume sync therefore head is no longer needed to be saved on per slot basis.
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState) error {
func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedBeaconBlock, r [32]byte, hs state.BeaconState, optimistic bool) error {
if err := blocks.BeaconBlockIsNil(b); err != nil {
return err
}
@@ -198,7 +198,7 @@ func (s *Service) saveHeadNoDB(ctx context.Context, b interfaces.ReadOnlySignedB
if err != nil {
return err
}
if err := s.setHeadInitialSync(r, bCp, hs); err != nil {
if err := s.setHeadInitialSync(r, bCp, hs, optimistic); err != nil {
return errors.Wrap(err, "could not set head")
}
return nil
@@ -227,7 +227,7 @@ func (s *Service) setHead(newHead *head) error {
// This sets head view object which is used to track the head slot, root, block and state. The method
// assumes that state being passed into the method will not be modified by any other alternate
// caller which holds the state's reference.
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState) error {
func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySignedBeaconBlock, state state.BeaconState, optimistic bool) error {
s.headLock.Lock()
defer s.headLock.Unlock()
@@ -237,9 +237,10 @@ func (s *Service) setHeadInitialSync(root [32]byte, block interfaces.ReadOnlySig
return err
}
s.head = &head{
root: root,
block: bCp,
state: state,
root: root,
block: bCp,
state: state,
optimistic: optimistic,
}
return nil
}

View File

@@ -86,18 +86,6 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
"headRoot": fmt.Sprintf("%#x", headRoot),
"headWeight": headWeight,
}).Debug("Head block is not the received block")
} else {
// Updating next slot state cache can happen in the background. It shouldn't block rest of the process.
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
}()
}
newBlockHeadElapsedTime.Observe(float64(time.Since(start).Milliseconds()))
@@ -126,8 +114,29 @@ func (s *Service) postBlockProcess(ctx context.Context, signed interfaces.ReadOn
})
defer reportAttestationInclusion(b)
if err := s.handleEpochBoundary(ctx, postState, blockRoot[:]); err != nil {
return err
if headRoot == blockRoot {
// Updating next slot state cache can happen in the background
// except in the epoch boundary in which case we lock to handle
// the shuffling and proposer caches updates.
// We handle these caches only on canonical
// blocks, otherwise this will be handled by lateBlockTasks
slot := postState.Slot()
if slots.IsEpochEnd(slot) {
if err := transition.UpdateNextSlotCache(ctx, blockRoot[:], postState); err != nil {
return errors.Wrap(err, "could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, slot, postState, blockRoot[:]); err != nil {
return errors.Wrap(err, "could not handle epoch boundary")
}
} else {
go func() {
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := transition.UpdateNextSlotCache(slotCtx, blockRoot[:], postState); err != nil {
log.WithError(err).Error("could not update next slot state cache")
}
}()
}
}
onBlockProcessingTime.Observe(float64(time.Since(startTime).Milliseconds()))
return nil
@@ -320,63 +329,51 @@ func (s *Service) onBlockBatch(ctx context.Context, blks []interfaces.ReadOnlySi
if _, err := s.notifyForkchoiceUpdate(ctx, arg); err != nil {
return err
}
return s.saveHeadNoDB(ctx, lastB, lastBR, preState)
return s.saveHeadNoDB(ctx, lastB, lastBR, preState, !isValidPayload)
}
// Epoch boundary bookkeeping such as logging epoch summaries.
func (s *Service) handleEpochBoundary(ctx context.Context, postState state.BeaconState, blockRoot []byte) error {
func (s *Service) updateEpochBoundaryCaches(ctx context.Context, st state.BeaconState) error {
e := coreTime.CurrentEpoch(st)
if err := helpers.UpdateCommitteeCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update committee cache")
}
if err := helpers.UpdateProposerIndicesInCache(ctx, st, e); err != nil {
return errors.Wrap(err, "could not update proposer index cache")
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateCommitteeCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Could not update committee cache")
}
if err := helpers.UpdateProposerIndicesInCache(slotCtx, st, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
}
}()
return nil
}
// Epoch boundary tasks: it copies the headState and updates the epoch boundary
// caches.
func (s *Service) handleEpochBoundary(ctx context.Context, slot primitives.Slot, headState state.BeaconState, blockRoot []byte) error {
ctx, span := trace.StartSpan(ctx, "blockChain.handleEpochBoundary")
defer span.End()
var err error
if postState.Slot()+1 == s.nextEpochBoundarySlot {
copied := postState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, copied.Slot()+1)
if err != nil {
return err
}
// Update caches for the next epoch at epoch boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, copied, coreTime.CurrentEpoch(copied)); err != nil {
return err
}
e := coreTime.CurrentEpoch(copied)
if err := helpers.UpdateProposerIndicesInCache(ctx, copied, e); err != nil {
return err
}
go func() {
// Use a custom deadline here, since this method runs asynchronously.
// We ignore the parent method's context and instead create a new one
// with a custom deadline, therefore using the background context instead.
slotCtx, cancel := context.WithTimeout(context.Background(), slotDeadline)
defer cancel()
if err := helpers.UpdateProposerIndicesInCache(slotCtx, copied, e+1); err != nil {
log.WithError(err).Warn("Failed to cache next epoch proposers")
}
}()
} else if postState.Slot() >= s.nextEpochBoundarySlot {
s.nextEpochBoundarySlot, err = slots.EpochStart(coreTime.NextEpoch(postState))
if err != nil {
return err
}
// Update caches at epoch boundary slot.
// The following updates have shortcut to return nil cheaply if fulfilled during boundary slot - 1.
if err := helpers.UpdateCommitteeCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
if err := helpers.UpdateProposerIndicesInCache(ctx, postState, coreTime.CurrentEpoch(postState)); err != nil {
return err
}
headSt, err := s.HeadState(ctx)
if err != nil {
return err
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
return err
}
// return early if we are advancing to a past epoch
if slot < headState.Slot() {
return nil
}
return nil
if !slots.IsEpochEnd(slot) {
return nil
}
copied := headState.Copy()
copied, err := transition.ProcessSlotsUsingNextSlotCache(ctx, copied, blockRoot, slot+1)
if err != nil {
return err
}
return s.updateEpochBoundaryCaches(ctx, copied)
}
// This feeds in the attestations included in the block to fork choice store. It's allows fork choice store
@@ -507,8 +504,9 @@ func (s *Service) runLateBlockTasks() {
// lateBlockTasks is called 4 seconds into the slot and performs tasks
// related to late blocks. It emits a MissedSlot state feed event.
// It calls FCU and sets the right attributes if we are proposing next slot
// it also updates the next slot cache to deal with skipped slots.
// it also updates the next slot cache and the proposer index cache to deal with skipped slots.
func (s *Service) lateBlockTasks(ctx context.Context) {
currentSlot := s.CurrentSlot()
if s.CurrentSlot() == s.HeadSlot() {
return
}
@@ -516,8 +514,10 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
Type: statefeed.MissedSlot,
})
s.headLock.RLock()
headRoot := s.headRoot()
headState := s.headState(ctx)
s.headLock.RUnlock()
lastRoot, lastState := transition.LastCachedState()
if lastState == nil {
lastRoot, lastState = headRoot[:], headState
@@ -528,7 +528,9 @@ func (s *Service) lateBlockTasks(ctx context.Context) {
if err := transition.UpdateNextSlotCache(ctx, lastRoot, lastState); err != nil {
log.WithError(err).Debug("could not update next slot state cache")
}
if err := s.handleEpochBoundary(ctx, currentSlot, headState, headRoot[:]); err != nil {
log.WithError(err).Error("lateBlockTasks: could not update epoch boundary caches")
}
// Head root should be empty when retrieving proposer index for the next slot.
_, id, has := s.cfg.ProposerSlotIndexCache.GetProposerPayloadIDs(s.CurrentSlot()+1, [32]byte{} /* head root */)
// There exists proposer for next slot, but we haven't called fcu w/ payload attribute yet.

View File

@@ -539,8 +539,7 @@ func TestHandleEpochBoundary_UpdateFirstSlot(t *testing.T) {
s, _ := util.DeterministicGenesisState(t, 1024)
service.head = &head{state: s}
require.NoError(t, s.SetSlot(2*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, service.handleEpochBoundary(ctx, s, []byte{}))
require.Equal(t, 3*params.BeaconConfig().SlotsPerEpoch, service.nextEpochBoundarySlot)
require.NoError(t, service.handleEpochBoundary(ctx, s.Slot(), s, []byte{}))
}
func TestOnBlock_CanFinalize_WithOnTick(t *testing.T) {

View File

@@ -86,23 +86,30 @@ func (s *Service) spawnProcessAttestationsRoutine() {
}
log.Warn("Genesis time received, now available to process attestations")
}
// Wait for node to be synced before running the routine.
if err := s.waitForSync(); err != nil {
log.WithError(err).Error("Could not wait to sync")
return
}
st := slots.NewSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
pat := slots.NewSlotTickerWithOffset(s.genesisTime, -reorgLateBlockCountAttestations, params.BeaconConfig().SecondsPerSlot)
reorgInterval := time.Second*time.Duration(params.BeaconConfig().SecondsPerSlot) - reorgLateBlockCountAttestations
ticker := slots.NewSlotTickerWithIntervals(s.genesisTime, []time.Duration{0, reorgInterval})
for {
select {
case <-s.ctx.Done():
return
case <-pat.C():
s.UpdateHead(s.ctx, s.CurrentSlot()+1)
case <-st.C():
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, s.CurrentSlot()); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
case slotInterval := <-ticker.C():
if slotInterval.Interval > 0 {
s.UpdateHead(s.ctx, slotInterval.Slot+1)
} else {
s.cfg.ForkChoiceStore.Lock()
if err := s.cfg.ForkChoiceStore.NewSlot(s.ctx, slotInterval.Slot); err != nil {
log.WithError(err).Error("could not process new slot")
}
s.cfg.ForkChoiceStore.Unlock()
s.UpdateHead(s.ctx, s.CurrentSlot())
s.UpdateHead(s.ctx, slotInterval.Slot)
}
}
}
}()

View File

@@ -8,6 +8,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/features"
@@ -22,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
"golang.org/x/sync/errgroup"
)
// This defines how many epochs since finality the run time will begin to save hot state on to the DB.
@@ -60,19 +62,31 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
// Save current justified and finalized epochs for future use.
currStoreJustifiedEpoch := s.CurrentJustifiedCheckpt().Epoch
currStoreFinalizedEpoch := s.FinalizedCheckpt().Epoch
currentEpoch := coreTime.CurrentEpoch(preState)
preStateVersion, preStateHeader, err := getStateVersionAndPayload(preState)
if err != nil {
return err
}
postState, err := s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
isValidPayload, err := s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
eg, _ := errgroup.WithContext(ctx)
var postState state.BeaconState
eg.Go(func() error {
postState, err = s.validateStateTransition(ctx, preState, blockCopy)
if err != nil {
return errors.Wrap(err, "failed to validate consensus state transition function")
}
return nil
})
var isValidPayload bool
eg.Go(func() error {
isValidPayload, err = s.validateExecutionOnBlock(ctx, preStateVersion, preStateHeader, blockCopy, blockRoot)
if err != nil {
return errors.Wrap(err, "could not notify the engine of the new payload")
}
return nil
})
if err := eg.Wait(); err != nil {
return err
}
// The rest of block processing takes a lock on forkchoice.
s.cfg.ForkChoiceStore.Lock()
@@ -86,7 +100,15 @@ func (s *Service) ReceiveBlock(ctx context.Context, block interfaces.ReadOnlySig
tracing.AnnotateError(span, err)
return err
}
if coreTime.CurrentEpoch(postState) > currentEpoch {
headSt, err := s.HeadState(ctx)
if err != nil {
return errors.Wrap(err, "could not get head state")
}
if err := reportEpochMetrics(ctx, postState, headSt); err != nil {
log.WithError(err).Error("could not report epoch metrics")
}
}
if err := s.updateJustificationOnBlock(ctx, preState, postState, currStoreJustifiedEpoch); err != nil {
return errors.Wrap(err, "could not update justified checkpoint")
}

View File

@@ -35,7 +35,6 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
@@ -46,22 +45,21 @@ import (
// Service represents a service that handles the internal
// logic of managing the full PoS beacon chain.
type Service struct {
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
nextEpochBoundarySlot primitives.Slot
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
cfg *config
ctx context.Context
cancel context.CancelFunc
genesisTime time.Time
head *head
headLock sync.RWMutex
originBlockRoot [32]byte // genesis root, or weak subjectivity checkpoint root, depending on how the node is initialized
boundaryRoots [][32]byte
checkpointStateCache *cache.CheckpointStateCache
initSyncBlocks map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
initSyncBlocksLock sync.RWMutex
wsVerifier *WeakSubjectivityVerifier
clockSetter startup.ClockSetter
clockWaiter startup.ClockWaiter
syncComplete chan struct{}
}
// config options for the service.

View File

@@ -357,7 +357,7 @@ func TestChainService_SaveHeadNoDB(t *testing.T) {
require.NoError(t, s.cfg.StateGen.SaveState(ctx, r, newState))
wsb, err := consensusblocks.NewSignedBeaconBlock(blk)
require.NoError(t, err)
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState))
require.NoError(t, s.saveHeadNoDB(ctx, wsb, r, newState, false))
newB, err := s.cfg.BeaconDB.HeadBlock(ctx)
require.NoError(t, err)

View File

@@ -295,43 +295,39 @@ func ShuffledIndices(s state.ReadOnlyBeaconState, epoch primitives.Epoch) ([]pri
}
// UpdateCommitteeCache gets called at the beginning of every epoch to cache the committee shuffled indices
// list with committee index and epoch number. It caches the shuffled indices for current epoch and next epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, epoch primitives.Epoch) error {
for _, e := range []primitives.Epoch{epoch, epoch + 1} {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
// list with committee index and epoch number. It caches the shuffled indices for the input epoch.
func UpdateCommitteeCache(ctx context.Context, state state.ReadOnlyBeaconState, e primitives.Epoch) error {
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
if err != nil {
return err
}
if committeeCache.HasEntry(string(seed[:])) {
return nil
}
shuffledIndices, err := ShuffledIndices(state, e)
if err != nil {
return err
}
count := SlotCommitteeCount(uint64(len(shuffledIndices)))
// Store the sorted indices as well as shuffled indices. In current spec,
// sorted indices is required to retrieve proposer index. This is also
// used for failing verify signature fallback.
sortedIndices := make([]primitives.ValidatorIndex, len(shuffledIndices))
copy(sortedIndices, shuffledIndices)
sort.Slice(sortedIndices, func(i, j int) bool {
return sortedIndices[i] < sortedIndices[j]
})
if err := committeeCache.AddCommitteeShuffledList(ctx, &cache.Committees{
ShuffledIndices: shuffledIndices,
CommitteeCount: uint64(params.BeaconConfig().SlotsPerEpoch.Mul(count)),
Seed: seed,
SortedIndices: sortedIndices,
}); err != nil {
return err
}
return nil
}

View File

@@ -413,7 +413,7 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
require.NoError(t, err)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, time.CurrentEpoch(state)))
epoch := primitives.Epoch(1)
epoch := primitives.Epoch(0)
idx := primitives.CommitteeIndex(1)
seed, err := Seed(state, epoch, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
@@ -423,6 +423,40 @@ func TestUpdateCommitteeCache_CanUpdate(t *testing.T) {
assert.Equal(t, params.BeaconConfig().TargetCommitteeSize, uint64(len(indices)), "Did not save correct indices lengths")
}
func TestUpdateCommitteeCache_CanUpdateAcrossEpochs(t *testing.T) {
ClearCache()
defer ClearCache()
validatorCount := params.BeaconConfig().MinGenesisActiveValidatorCount
validators := make([]*ethpb.Validator, validatorCount)
indices := make([]primitives.ValidatorIndex, validatorCount)
for i := primitives.ValidatorIndex(0); uint64(i) < validatorCount; i++ {
validators[i] = &ethpb.Validator{
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
EffectiveBalance: 1,
}
indices[i] = i
}
state, err := state_native.InitializeFromProtoPhase0(&ethpb.BeaconState{
Validators: validators,
RandaoMixes: make([][]byte, params.BeaconConfig().EpochsPerHistoricalVector),
})
require.NoError(t, err)
e := time.CurrentEpoch(state)
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e))
seed, err := Seed(state, e, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, true, committeeCache.HasEntry(string(seed[:])))
nextSeed, err := Seed(state, e+1, params.BeaconConfig().DomainBeaconAttester)
require.NoError(t, err)
require.Equal(t, false, committeeCache.HasEntry(string(nextSeed[:])))
require.NoError(t, UpdateCommitteeCache(context.Background(), state, e+1))
require.Equal(t, true, committeeCache.HasEntry(string(nextSeed[:])))
}
func BenchmarkComputeCommittee300000_WithPreCache(b *testing.B) {
validators := make([]*ethpb.Validator, 300000)
for i := 0; i < len(validators); i++ {

View File

@@ -184,7 +184,7 @@ func innerShuffleList(input []primitives.ValidatorIndex, seed [32]byte, shuffle
for {
buf[seedSize] = r
ph := hashfunc(buf[:pivotViewSize])
pivot := bytesutil.FromBytes8(ph[:8]) % listSize
pivot := binary.LittleEndian.Uint64(ph[:8]) % listSize
mirror := (pivot + 1) >> 1
binary.LittleEndian.PutUint32(buf[pivotViewSize:], uint32(pivot>>8))
source := hashfunc(buf)

View File

@@ -158,8 +158,7 @@ func TestConfigureNetwork_ConfigFile(t *testing.T) {
return cmd.LoadFlagsFromConfig(cliCtx, comFlags)
},
Action: func(cliCtx *cli.Context) error {
//TODO: https://github.com/urfave/cli/issues/1197 right now does not set flag
require.Equal(t, false, cliCtx.IsSet(cmd.BootstrapNode.Name))
require.Equal(t, true, cliCtx.IsSet(cmd.BootstrapNode.Name))
require.Equal(t, strings.Join([]string{"node1", "node2"}, ","),
strings.Join(cliCtx.StringSlice(cmd.BootstrapNode.Name), ","))

View File

@@ -34,14 +34,17 @@ func (s *Service) prepareForkChoiceAtts() {
ticker := slots.NewSlotTickerWithIntervals(time.Unix(int64(s.genesisTime), 0), intervals[:])
for {
select {
case <-ticker.C():
case slotInterval := <-ticker.C():
t := time.Now()
if err := s.batchForkChoiceAtts(s.ctx); err != nil {
log.WithError(err).Error("Could not prepare attestations for fork choice")
}
if slots.TimeIntoSlot(s.genesisTime) < intervals[1] {
batchForkChoiceAttsT1.Observe(float64(time.Since(t).Milliseconds()))
} else if slots.TimeIntoSlot(s.genesisTime) < intervals[2] {
switch slotInterval.Interval {
case 0:
duration := time.Since(t)
log.WithField("Duration", duration).Debug("aggregated unaggregated attestations")
batchForkChoiceAttsT1.Observe(float64(duration.Milliseconds()))
case 1:
batchForkChoiceAttsT2.Observe(float64(time.Since(t).Milliseconds()))
}
case <-s.ctx.Done():

View File

@@ -24,6 +24,7 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/beacon:go_default_library",
"//beacon-chain/rpc/eth/builder:go_default_library",
"//beacon-chain/rpc/eth/debug:go_default_library",

View File

@@ -16,14 +16,14 @@ go_library(
"//api/gateway/apimiddleware:go_default_library",
"//api/grpc:go_default_library",
"//beacon-chain/rpc/eth/events:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//proto/eth/v2:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_r3labs_sse//:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
],
)
@@ -46,6 +46,6 @@ go_test(
"//testing/require:go_default_library",
"//time/slots:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_r3labs_sse//:go_default_library",
"@com_github_r3labs_sse_v2//:go_default_library",
],
)

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/r3labs/sse"
"github.com/r3labs/sse/v2"
)
// match a number with optional decimals

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/events"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/r3labs/sse"
"github.com/r3labs/sse/v2"
)
type testSSZResponseJson struct {

View File

@@ -130,72 +130,6 @@ func wrapValidatorIndicesArray(
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/publishAggregateAndProofs expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSignedAggregateAndProofArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitAggregateAndProofsRequestJson); ok {
data := make([]*SignedAggregateAttestationAndProofJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitAggregateAndProofsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-apis/#/Validator/prepareBeaconCommitteeSubnet expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapBeaconCommitteeSubscriptionsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitBeaconCommitteeSubscriptionsRequestJson); ok {
data := make([]*BeaconCommitteeSubscribeJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitBeaconCommitteeSubscriptionsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/prepareSyncCommitteeSubnets expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSyncCommitteeSubscriptionsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitSyncCommitteeSubscriptionRequestJson); ok {
data := make([]*SyncCommitteeSubscriptionJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitSyncCommitteeSubscriptionRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Beacon/submitPoolSyncCommitteeSignatures expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSyncCommitteeSignaturesArray(
@@ -218,28 +152,6 @@ func wrapSyncCommitteeSignaturesArray(
return true, nil
}
// https://ethereum.github.io/beacon-APIs/#/Validator/publishContributionAndProofs expects posting a top-level array.
// We make it more proto-friendly by wrapping it in a struct with a 'data' field.
func wrapSignedContributionAndProofsArray(
endpoint *apimiddleware.Endpoint,
_ http.ResponseWriter,
req *http.Request,
) (apimiddleware.RunDefault, apimiddleware.ErrorJson) {
if _, ok := endpoint.PostRequest.(*SubmitContributionAndProofsRequestJson); ok {
data := make([]*SignedContributionAndProofJson, 0)
if err := json.NewDecoder(req.Body).Decode(&data); err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not decode body")
}
j := &SubmitContributionAndProofsRequestJson{Data: data}
b, err := json.Marshal(j)
if err != nil {
return false, apimiddleware.InternalServerErrorWithMessage(err, "could not marshal wrapped body")
}
req.Body = io.NopCloser(bytes.NewReader(b))
}
return true, nil
}
type phase0PublishBlockRequestJson struct {
Phase0Block *BeaconBlockJson `json:"phase0_block"`
Signature string `json:"signature" hex:"true"`

View File

@@ -140,151 +140,6 @@ func TestWrapBLSChangesArray(t *testing.T) {
})
}
func TestWrapSignedAggregateAndProofArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAggregateAndProofsRequestJson{},
}
unwrappedAggs := []*SignedAggregateAttestationAndProofJson{{Signature: "sig"}}
unwrappedAggsJson, err := json.Marshal(unwrappedAggs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedAggsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedAggregateAndProofArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedAggs := &SubmitAggregateAndProofsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedAggs))
require.Equal(t, 1, len(wrappedAggs.Data), "wrong number of wrapped items")
assert.Equal(t, "sig", wrappedAggs.Data[0].Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitAggregateAndProofsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedAggregateAndProofArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapBeaconCommitteeSubscriptionsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
}
unwrappedSubs := []*BeaconCommitteeSubscribeJson{{
ValidatorIndex: "1",
CommitteeIndex: "1",
CommitteesAtSlot: "1",
Slot: "1",
IsAggregator: true,
}}
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSubsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSubs := &SubmitBeaconCommitteeSubscriptionsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
require.Equal(t, 1, len(wrappedSubs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteeIndex)
assert.Equal(t, "1", wrappedSubs.Data[0].CommitteesAtSlot)
assert.Equal(t, "1", wrappedSubs.Data[0].Slot)
assert.Equal(t, true, wrappedSubs.Data[0].IsAggregator)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitBeaconCommitteeSubscriptionsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapBeaconCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSyncCommitteeSubscriptionsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
}
unwrappedSubs := []*SyncCommitteeSubscriptionJson{
{
ValidatorIndex: "1",
SyncCommitteeIndices: []string{"1", "2"},
UntilEpoch: "1",
},
{
ValidatorIndex: "2",
SyncCommitteeIndices: []string{"3", "4"},
UntilEpoch: "2",
},
}
unwrappedSubsJson, err := json.Marshal(unwrappedSubs)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedSubsJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrappedSubs := &SubmitSyncCommitteeSubscriptionRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrappedSubs))
require.Equal(t, 2, len(wrappedSubs.Data), "wrong number of wrapped items")
assert.Equal(t, "1", wrappedSubs.Data[0].ValidatorIndex)
require.Equal(t, 2, len(wrappedSubs.Data[0].SyncCommitteeIndices), "wrong number of committee indices")
assert.Equal(t, "1", wrappedSubs.Data[0].SyncCommitteeIndices[0])
assert.Equal(t, "2", wrappedSubs.Data[0].SyncCommitteeIndices[1])
assert.Equal(t, "1", wrappedSubs.Data[0].UntilEpoch)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitSyncCommitteeSubscriptionRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSyncCommitteeSubscriptionsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestWrapSyncCommitteeSignaturesArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
@@ -333,75 +188,6 @@ func TestWrapSyncCommitteeSignaturesArray(t *testing.T) {
})
}
func TestWrapSignedContributionAndProofsArray(t *testing.T) {
t.Run("ok", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitContributionAndProofsRequestJson{},
}
unwrapped := []*SignedContributionAndProofJson{
{
Message: &ContributionAndProofJson{
AggregatorIndex: "1",
Contribution: &SyncCommitteeContributionJson{
Slot: "1",
BeaconBlockRoot: "root",
SubcommitteeIndex: "1",
AggregationBits: "bits",
Signature: "sig",
},
SelectionProof: "proof",
},
Signature: "sig",
},
{
Message: &ContributionAndProofJson{},
Signature: "sig",
},
}
unwrappedJson, err := json.Marshal(unwrapped)
require.NoError(t, err)
var body bytes.Buffer
_, err = body.Write(unwrappedJson)
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedContributionAndProofsArray(endpoint, nil, request)
require.Equal(t, true, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(true), runDefault)
wrapped := &SubmitContributionAndProofsRequestJson{}
require.NoError(t, json.NewDecoder(request.Body).Decode(wrapped))
require.Equal(t, 2, len(wrapped.Data), "wrong number of wrapped items")
assert.Equal(t, "sig", wrapped.Data[0].Signature)
require.NotNil(t, wrapped.Data[0].Message)
msg := wrapped.Data[0].Message
assert.Equal(t, "1", msg.AggregatorIndex)
assert.Equal(t, "proof", msg.SelectionProof)
require.NotNil(t, msg.Contribution)
assert.Equal(t, "1", msg.Contribution.Slot)
assert.Equal(t, "root", msg.Contribution.BeaconBlockRoot)
assert.Equal(t, "1", msg.Contribution.SubcommitteeIndex)
assert.Equal(t, "bits", msg.Contribution.AggregationBits)
assert.Equal(t, "sig", msg.Contribution.Signature)
})
t.Run("invalid_body", func(t *testing.T) {
endpoint := &apimiddleware.Endpoint{
PostRequest: &SubmitContributionAndProofsRequestJson{},
}
var body bytes.Buffer
_, err := body.Write([]byte("invalid"))
require.NoError(t, err)
request := httptest.NewRequest("POST", "http://foo.example", &body)
runDefault, errJson := wrapSignedContributionAndProofsArray(endpoint, nil, request)
require.Equal(t, false, errJson == nil)
assert.Equal(t, apimiddleware.RunDefault(false), runDefault)
assert.Equal(t, true, strings.Contains(errJson.Msg(), "could not decode body"))
assert.Equal(t, http.StatusInternalServerError, errJson.StatusCode())
})
}
func TestSetInitialPublishBlockPostRequest(t *testing.T) {
params.SetupTestConfigCleanup(t)
cfg := params.BeaconConfig().Copy()

View File

@@ -48,7 +48,6 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/node/peers/{peer_id}",
"/eth/v1/node/peer_count",
"/eth/v1/node/version",
"/eth/v1/node/syncing",
"/eth/v1/node/health",
"/eth/v1/debug/beacon/states/{state_id}",
"/eth/v2/debug/beacon/states/{state_id}",
@@ -65,13 +64,7 @@ func (_ *BeaconEndpointFactory) Paths() []string {
"/eth/v1/validator/blocks/{slot}",
"/eth/v2/validator/blocks/{slot}",
"/eth/v1/validator/blinded_blocks/{slot}",
"/eth/v1/validator/attestation_data",
"/eth/v1/validator/aggregate_attestation",
"/eth/v1/validator/beacon_committee_subscriptions",
"/eth/v1/validator/sync_committee_subscriptions",
"/eth/v1/validator/aggregate_and_proofs",
"/eth/v1/validator/sync_committee_contribution",
"/eth/v1/validator/contribution_and_proofs",
"/eth/v1/validator/prepare_beacon_proposer",
"/eth/v1/validator/register_validator",
"/eth/v1/validator/liveness/{epoch}",
@@ -190,8 +183,6 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
endpoint.GetResponse = &PeerCountResponseJson{}
case "/eth/v1/node/version":
endpoint.GetResponse = &VersionResponseJson{}
case "/eth/v1/node/syncing":
endpoint.GetResponse = &SyncingResponseJson{}
case "/eth/v1/node/health":
// Use default endpoint
case "/eth/v1/debug/beacon/states/{state_id}":
@@ -260,37 +251,9 @@ func (_ *BeaconEndpointFactory) Create(path string) (*apimiddleware.Endpoint, er
OnPreSerializeMiddlewareResponseIntoJson: serializeProducedBlindedBlock,
}
endpoint.CustomHandlers = []apimiddleware.CustomHandler{handleProduceBlindedBlockSSZ}
case "/eth/v1/validator/attestation_data":
endpoint.GetResponse = &ProduceAttestationDataResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "committee_index"}}
case "/eth/v1/validator/aggregate_attestation":
endpoint.GetResponse = &AggregateAttestationResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "attestation_data_root", Hex: true}, {Name: "slot"}}
case "/eth/v1/validator/beacon_committee_subscriptions":
endpoint.PostRequest = &SubmitBeaconCommitteeSubscriptionsRequestJson{}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapBeaconCommitteeSubscriptionsArray,
}
case "/eth/v1/validator/sync_committee_subscriptions":
endpoint.PostRequest = &SubmitSyncCommitteeSubscriptionRequestJson{}
endpoint.Err = &NodeSyncDetailsErrorJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSyncCommitteeSubscriptionsArray,
}
case "/eth/v1/validator/aggregate_and_proofs":
endpoint.PostRequest = &SubmitAggregateAndProofsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedAggregateAndProofArray,
}
case "/eth/v1/validator/sync_committee_contribution":
endpoint.GetResponse = &ProduceSyncCommitteeContributionResponseJson{}
endpoint.RequestQueryParams = []apimiddleware.QueryParam{{Name: "slot"}, {Name: "subcommittee_index"}, {Name: "beacon_block_root", Hex: true}}
case "/eth/v1/validator/contribution_and_proofs":
endpoint.PostRequest = &SubmitContributionAndProofsRequestJson{}
endpoint.Hooks = apimiddleware.HookCollection{
OnPreDeserializeRequestBodyIntoContainer: wrapSignedContributionAndProofsArray,
}
case "/eth/v1/validator/prepare_beacon_proposer":
endpoint.PostRequest = &FeeRecipientsRequestJSON{}
endpoint.Hooks = apimiddleware.HookCollection{

View File

@@ -4,7 +4,7 @@ import (
"strings"
"github.com/prysmaticlabs/prysm/v4/api/gateway/apimiddleware"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
)
@@ -199,7 +199,7 @@ type VersionResponseJson struct {
}
type SyncingResponseJson struct {
Data *helpers.SyncDetailsJson `json:"data"`
Data *shared.SyncDetails `json:"data"`
}
type BeaconStateResponseJson struct {
@@ -268,18 +268,10 @@ type ProduceBlindedBlockResponseJson struct {
Data *BlindedBeaconBlockContainerJson `json:"data"`
}
type ProduceAttestationDataResponseJson struct {
Data *AttestationDataJson `json:"data"`
}
type AggregateAttestationResponseJson struct {
Data *AttestationJson `json:"data"`
}
type SubmitBeaconCommitteeSubscriptionsRequestJson struct {
Data []*BeaconCommitteeSubscribeJson `json:"data"`
}
type BeaconCommitteeSubscribeJson struct {
ValidatorIndex string `json:"validator_index"`
CommitteeIndex string `json:"committee_index"`
@@ -288,28 +280,10 @@ type BeaconCommitteeSubscribeJson struct {
IsAggregator bool `json:"is_aggregator"`
}
type SubmitSyncCommitteeSubscriptionRequestJson struct {
Data []*SyncCommitteeSubscriptionJson `json:"data"`
}
type SyncCommitteeSubscriptionJson struct {
ValidatorIndex string `json:"validator_index"`
SyncCommitteeIndices []string `json:"sync_committee_indices"`
UntilEpoch string `json:"until_epoch"`
}
type SubmitAggregateAndProofsRequestJson struct {
Data []*SignedAggregateAttestationAndProofJson `json:"data"`
}
type ProduceSyncCommitteeContributionResponseJson struct {
Data *SyncCommitteeContributionJson `json:"data"`
}
type SubmitContributionAndProofsRequestJson struct {
Data []*SignedContributionAndProofJson `json:"data"`
}
type ForkChoiceNodeResponseJson struct {
Slot string `json:"slot"`
BlockRoot string `json:"block_root" hex:"true"`
@@ -1219,7 +1193,7 @@ type SingleIndexedVerificationFailureJson struct {
type NodeSyncDetailsErrorJson struct {
apimiddleware.DefaultErrorJson
SyncDetails helpers.SyncDetailsJson `json:"sync_details"`
SyncDetails shared.SyncDetails `json:"sync_details"`
}
type EventErrorJson struct {

View File

@@ -4,22 +4,39 @@ go_library(
name = "go_default_library",
srcs = [
"errors.go",
"log.go",
"service.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/altair:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/time:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/rand:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//runtime/version:go_default_library",
"//time:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -0,0 +1,5 @@
package core
import "github.com/sirupsen/logrus"
var log = logrus.WithField("prefix", "rpc/core")

View File

@@ -0,0 +1,22 @@
package core
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
)
type Service struct {
HeadFetcher blockchain.HeadFetcher
GenesisTimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
Broadcaster p2p.Broadcaster
SyncCommitteePool synccommittee.Pool
OperationNotifier opfeed.Notifier
AttestationCache *cache.AttestationCache
StateGen stategen.StateManager
}

View File

@@ -1,34 +1,70 @@
package core
import (
"bytes"
"context"
"fmt"
"sort"
"time"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/altair"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
prysmTime "github.com/prysmaticlabs/prysm/v4/time"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
func ComputeValidatorPerformance(
// AggregateBroadcastFailedError represents an error scenario where
// broadcasting an aggregate selection proof failed.
type AggregateBroadcastFailedError struct {
err error
}
// NewAggregateBroadcastFailedError creates a new error instance.
func NewAggregateBroadcastFailedError(err error) AggregateBroadcastFailedError {
return AggregateBroadcastFailedError{
err: err,
}
}
// Error returns the underlying error message.
func (e *AggregateBroadcastFailedError) Error() string {
return fmt.Sprintf("could not broadcast signed aggregated attestation: %s", e.err.Error())
}
// ComputeValidatorPerformance reports the validator's latest balance along with other important metrics on
// rewards and penalties throughout its lifecycle in the beacon chain.
func (s *Service) ComputeValidatorPerformance(
ctx context.Context,
req *ethpb.ValidatorPerformanceRequest,
headFetcher blockchain.HeadFetcher,
currSlot primitives.Slot,
) (*ethpb.ValidatorPerformanceResponse, *RpcError) {
headState, err := headFetcher.HeadState(ctx)
if s.SyncChecker.Syncing() {
return nil, &RpcError{Reason: Unavailable, Err: errors.New("Syncing to latest head, not ready to respond")}
}
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head state"), Reason: Internal}
}
currSlot := s.GenesisTimeFetcher.CurrentSlot()
if currSlot > headState.Slot() {
headRoot, err := headFetcher.HeadRoot(ctx)
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Err: errors.Wrap(err, "could not get head root"), Reason: Internal}
}
@@ -166,3 +202,219 @@ func ComputeValidatorPerformance(
InactivityScores: inactivityScores, // Only populated in Altair
}, nil
}
// SubmitSignedContributionAndProof is called by a sync committee aggregator
// to submit signed contribution and proof object.
func (s *Service) SubmitSignedContributionAndProof(
ctx context.Context,
req *ethpb.SignedContributionAndProof,
) *RpcError {
errs, ctx := errgroup.WithContext(ctx)
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
errs.Go(func() error {
return s.Broadcaster.Broadcast(ctx, req)
})
if err := s.SyncCommitteePool.SaveSyncCommitteeContribution(req.Message.Contribution); err != nil {
return &RpcError{Err: err, Reason: Internal}
}
// Wait for p2p broadcast to complete and return the first error (if any)
err := errs.Wait()
if err != nil {
return &RpcError{Err: err, Reason: Internal}
}
s.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.SyncCommitteeContributionReceived,
Data: &opfeed.SyncCommitteeContributionReceivedData{
Contribution: req,
},
})
return nil
}
// SubmitSignedAggregateSelectionProof verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
func (s *Service) SubmitSignedAggregateSelectionProof(
ctx context.Context,
req *ethpb.SignedAggregateSubmitRequest,
) *RpcError {
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
return &RpcError{Err: errors.New("signed aggregate request can't be nil"), Reason: BadRequest}
}
emptySig := make([]byte, fieldparams.BLSSignatureLength)
if bytes.Equal(req.SignedAggregateAndProof.Signature, emptySig) ||
bytes.Equal(req.SignedAggregateAndProof.Message.SelectionProof, emptySig) {
return &RpcError{Err: errors.New("signed signatures can't be zero hashes"), Reason: BadRequest}
}
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
s.GenesisTimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return &RpcError{Err: errors.New("attestation slot is no longer valid from current time"), Reason: BadRequest}
}
if err := s.Broadcaster.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
return &RpcError{Err: &AggregateBroadcastFailedError{err: err}, Reason: Internal}
}
log.WithFields(logrus.Fields{
"slot": req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
"committeeIndex": req.SignedAggregateAndProof.Message.Aggregate.Data.CommitteeIndex,
"validatorIndex": req.SignedAggregateAndProof.Message.AggregatorIndex,
"aggregatedCount": req.SignedAggregateAndProof.Message.Aggregate.AggregationBits.Count(),
}).Debug("Broadcasting aggregated attestation and proof")
return nil
}
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
func AssignValidatorToSubnet(pubkey []byte, status validator.ValidatorStatus) {
if status != validator.Active {
return
}
assignValidatorToSubnet(pubkey)
}
// AssignValidatorToSubnetProto checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
//
// It has a Proto suffix because the status is a protobuf type.
func AssignValidatorToSubnetProto(pubkey []byte, status ethpb.ValidatorStatus) {
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
return
}
assignValidatorToSubnet(pubkey)
}
func assignValidatorToSubnet(pubkey []byte) {
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets(pubkey)
if ok && expTime.After(prysmTime.Now()) {
return
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
var assignedIdxs []uint64
randGen := rand.NewGenerator()
for i := uint64(0); i < params.BeaconConfig().RandomSubnetsPerValidator; i++ {
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
}
assignedDuration := uint64(randGen.Intn(int(params.BeaconConfig().EpochsPerRandomSubnetSubscription)))
assignedDuration += params.BeaconConfig().EpochsPerRandomSubnetSubscription
totalDuration := epochDuration * time.Duration(assignedDuration)
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
}
// GetAttestationData requests that the beacon node produces attestation data for
// the requested committee index and slot based on the nodes current head.
func (s *Service) GetAttestationData(
ctx context.Context, req *ethpb.AttestationDataRequest,
) (*ethpb.AttestationData, *RpcError) {
if err := helpers.ValidateAttestationTime(
req.Slot,
s.GenesisTimeFetcher.GenesisTime(),
params.BeaconNetworkConfig().MaximumGossipClockDisparity,
); err != nil {
return nil, &RpcError{Reason: BadRequest, Err: errors.Errorf("invalid request: %v", err)}
}
res, err := s.AttestationCache.Get(ctx, req)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
}
if res != nil {
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
if err := s.AttestationCache.MarkInProgress(req); err != nil {
if errors.Is(err, cache.ErrAlreadyInProgress) {
res, err := s.AttestationCache.Get(ctx, req)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve data from attestation cache: %v", err)}
}
if res == nil {
return nil, &RpcError{Reason: Internal, Err: errors.New("a request was in progress and resolved to nil")}
}
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not mark attestation as in-progress: %v", err)}
}
defer func() {
if err := s.AttestationCache.MarkNotInProgress(req); err != nil {
log.WithError(err).Error("could not mark attestation as not-in-progress")
}
}()
headState, err := s.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head state: %v", err)}
}
headRoot, err := s.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not retrieve head root: %v", err)}
}
// In the case that we receive an attestation request after a newer state/block has been processed.
if headState.Slot() > req.Slot {
headRoot, err = helpers.BlockRootAtSlot(headState, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head root: %v", err)}
}
headState, err = s.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(headRoot))
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get historical head state: %v", err)}
}
}
if headState == nil || headState.IsNil() {
return nil, &RpcError{Reason: Internal, Err: errors.New("could not lookup parent state from head")}
}
if coreTime.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) {
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not process slots up to %d: %v", req.Slot, err)}
}
}
targetEpoch := coreTime.CurrentEpoch(headState)
epochStartSlot, err := slots.EpochStart(targetEpoch)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not calculate epoch start: %v", err)}
}
var targetRoot []byte
if epochStartSlot == headState.Slot() {
targetRoot = headRoot
} else {
targetRoot, err = helpers.BlockRootAtSlot(headState, epochStartSlot)
if err != nil {
return nil, &RpcError{Reason: Internal, Err: errors.Errorf("could not get target block for slot %d: %v", epochStartSlot, err)}
}
if bytesutil.ToBytes32(targetRoot) == params.BeaconConfig().ZeroHash {
targetRoot = headRoot
}
}
res = &ethpb.AttestationData{
Slot: req.Slot,
CommitteeIndex: req.CommitteeIndex,
BeaconBlockRoot: headRoot,
Source: headState.CurrentJustifiedCheckpoint(),
Target: &ethpb.Checkpoint{
Epoch: targetEpoch,
Root: targetRoot,
},
}
if err := s.AttestationCache.Put(ctx, req, res); err != nil {
log.WithError(err).Error("could not store attestation data in cache")
}
return res, nil
}

View File

@@ -37,6 +37,7 @@ go_library(
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/state:go_default_library",
@@ -49,11 +50,12 @@ go_library(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//encoding/ssz/detect:go_default_library",
"//network:go_default_library",
"//network/forks:go_default_library",
"//network/http:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
@@ -101,6 +103,7 @@ go_test(
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/blstoexec:go_default_library",
"//beacon-chain/operations/blstoexec/mock:go_default_library",
@@ -119,6 +122,7 @@ go_test(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/bls/common:go_default_library",
"//crypto/hash:go_default_library",

View File

@@ -11,10 +11,10 @@ import (
"github.com/go-playground/validator/v10"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpbv2 "github.com/prysmaticlabs/prysm/v4/proto/eth/v2"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
@@ -38,10 +38,10 @@ const (
// a `SignedBeaconBlock`. The broadcast behaviour may be adjusted via the `broadcast_validation`
// query parameter.
func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request) {
if ok := bs.checkSync(r.Context(), w); !ok {
if shared.IsSyncing(r.Context(), w, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher) {
return
}
isSSZ, err := network.SszRequested(r)
isSSZ, err := http2.SszRequested(r)
if isSSZ && err == nil {
publishBlindedBlockV2SSZ(bs, w, r)
} else {
@@ -52,22 +52,22 @@ func (bs *Server) PublishBlindedBlockV2(w http.ResponseWriter, r *http.Request)
func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not read request body: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
capellaBlock := &ethpbv2.SignedBlindedBeaconBlockCapella{}
if err := capellaBlock.UnmarshalSSZ(body); err == nil {
v1block, err := migration.BlindedCapellaToV1Alpha1SignedBlock(capellaBlock)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -76,11 +76,11 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
@@ -90,11 +90,11 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
if err := bellatrixBlock.UnmarshalSSZ(body); err == nil {
v1block, err := migration.BlindedBellatrixToV1Alpha1SignedBlock(bellatrixBlock)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -103,11 +103,11 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
@@ -119,11 +119,11 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
if err := altairBlock.UnmarshalSSZ(body); err == nil {
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -132,11 +132,11 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
@@ -146,11 +146,11 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
if err := phase0Block.UnmarshalSSZ(body); err == nil {
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -159,32 +159,32 @@ func publishBlindedBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
}
func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -193,19 +193,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -218,19 +218,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -242,19 +242,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -266,19 +266,19 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -286,11 +286,11 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
}
}
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
}
// PublishBlockV2 instructs the beacon node to broadcast a newly signed beacon block to the beacon network,
@@ -302,10 +302,10 @@ func publishBlindedBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
// successfully broadcast but failed integration. The broadcast behaviour may be adjusted via the
// `broadcast_validation` query parameter.
func (bs *Server) PublishBlockV2(w http.ResponseWriter, r *http.Request) {
if ok := bs.checkSync(r.Context(), w); !ok {
if shared.IsSyncing(r.Context(), w, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher) {
return
}
isSSZ, err := network.SszRequested(r)
isSSZ, err := http2.SszRequested(r)
if isSSZ && err == nil {
publishBlockV2SSZ(bs, w, r)
} else {
@@ -317,11 +317,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
capellaBlock := &ethpbv2.SignedBeaconBlockCapella{}
@@ -329,11 +329,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(capellaBlock); err == nil {
v1block, err := migration.CapellaToV1Alpha1SignedBlock(capellaBlock)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -342,11 +342,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
@@ -358,11 +358,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(bellatrixBlock); err == nil {
v1block, err := migration.BellatrixToV1Alpha1SignedBlock(bellatrixBlock)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -371,11 +371,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
@@ -387,11 +387,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(altairBlock); err == nil {
v1block, err := migration.AltairToV1Alpha1SignedBlock(altairBlock)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -400,11 +400,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
@@ -416,11 +416,11 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(phase0Block); err == nil {
v1block, err := migration.V1ToV1Alpha1SignedBlock(phase0Block)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
genericBlock := &eth.GenericSignedBeaconBlock{
@@ -429,33 +429,33 @@ func publishBlockV2SSZ(bs *Server, w http.ResponseWriter, r *http.Request) {
},
}
if err = bs.validateBroadcast(r, genericBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, genericBlock)
return
}
}
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
}
func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
validate := validator.New()
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not read request body",
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
var capellaBlock *SignedBeaconBlockCapella
@@ -463,19 +463,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(capellaBlock); err == nil {
consensusBlock, err := capellaBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -487,19 +487,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(bellatrixBlock); err == nil {
consensusBlock, err := bellatrixBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -511,19 +511,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(altairBlock); err == nil {
consensusBlock, err := altairBlock.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -535,19 +535,19 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
if err = validate.Struct(phase0Block); err == nil {
consensusBlock, err := phase0Block.ToGeneric()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode request body into consensus block: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if err = bs.validateBroadcast(r, consensusBlock); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
bs.proposeBlock(r.Context(), w, consensusBlock)
@@ -555,21 +555,21 @@ func publishBlockV2(bs *Server, w http.ResponseWriter, r *http.Request) {
}
}
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Body does not represent a valid block type",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
}
func (bs *Server) proposeBlock(ctx context.Context, w http.ResponseWriter, blk *eth.GenericSignedBeaconBlock) {
_, err := bs.V1Alpha1ValidatorServer.ProposeBeaconBlock(ctx, blk)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
}
@@ -608,8 +608,13 @@ func (bs *Server) validateBroadcast(r *http.Request, blk *eth.GenericSignedBeaco
}
func (bs *Server) validateConsensus(ctx context.Context, blk interfaces.ReadOnlySignedBeaconBlock) error {
parentRoot := blk.Block().ParentRoot()
parentState, err := bs.Stater.State(ctx, parentRoot[:])
parentBlockRoot := blk.Block().ParentRoot()
parentBlock, err := bs.Blocker.Block(ctx, parentBlockRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent block")
}
parentStateRoot := parentBlock.Block().StateRoot()
parentState, err := bs.Stater.State(ctx, parentStateRoot[:])
if err != nil {
return errors.Wrap(err, "could not get parent state")
}
@@ -626,29 +631,3 @@ func (bs *Server) validateEquivocation(blk interfaces.ReadOnlyBeaconBlock) error
}
return nil
}
func (bs *Server) checkSync(ctx context.Context, w http.ResponseWriter) bool {
isSyncing, syncDetails, err := helpers.ValidateSyncHTTP(ctx, bs.SyncChecker, bs.HeadFetcher, bs.TimeFetcher, bs.OptimisticModeFetcher)
if err != nil {
errJson := &network.DefaultErrorJson{
Message: "Could not check if node is syncing: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
return false
}
if isSyncing {
msg := "Beacon node is currently syncing and not serving request on that endpoint"
details, err := json.Marshal(syncDetails)
if err == nil {
msg += " Details: " + string(details)
}
errJson := &network.DefaultErrorJson{
Message: msg,
Code: http.StatusServiceUnavailable,
}
network.WriteError(w, errJson)
return false
}
return true
}

View File

@@ -2,6 +2,7 @@ package beacon
import (
"bytes"
"context"
"encoding/json"
"net/http"
"net/http/httptest"
@@ -10,11 +11,20 @@ import (
"github.com/golang/mock/gomock"
testing2 "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
mock2 "github.com/prysmaticlabs/prysm/v4/testing/mock"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"github.com/stretchr/testify/mock"
)
@@ -351,6 +361,63 @@ func TestPublishBlindedBlockV2SSZ(t *testing.T) {
})
}
func TestValidateConsensus(t *testing.T) {
ctx := context.Background()
parentState, privs := util.DeterministicGenesisState(t, params.MinimalSpecConfig().MinGenesisActiveValidatorCount)
parentBlock, err := util.GenerateFullBlock(parentState, privs, util.DefaultBlockGenConfig(), parentState.Slot())
require.NoError(t, err)
parentSbb, err := blocks.NewSignedBeaconBlock(parentBlock)
require.NoError(t, err)
st, err := transition.ExecuteStateTransition(ctx, parentState, parentSbb)
require.NoError(t, err)
block, err := util.GenerateFullBlock(st, privs, util.DefaultBlockGenConfig(), st.Slot())
require.NoError(t, err)
sbb, err := blocks.NewSignedBeaconBlock(block)
require.NoError(t, err)
parentRoot, err := parentSbb.Block().HashTreeRoot()
require.NoError(t, err)
server := &Server{
Blocker: &testutil.MockBlocker{RootBlockMap: map[[32]byte]interfaces.ReadOnlySignedBeaconBlock{parentRoot: parentSbb}},
Stater: &testutil.MockStater{StatesByRoot: map[[32]byte]state.BeaconState{bytesutil.ToBytes32(parentBlock.Block.StateRoot): parentState}},
}
require.NoError(t, server.validateConsensus(ctx, sbb))
}
func TestValidateEquivocation(t *testing.T) {
t.Run("ok", func(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(10))
fc := doublylinkedtree.New()
require.NoError(t, fc.InsertNode(context.Background(), st, bytesutil.ToBytes32([]byte("root"))))
server := &Server{
ForkchoiceFetcher: &testing2.ChainService{ForkChoiceStore: fc},
}
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
blk.SetSlot(st.Slot() + 1)
require.NoError(t, server.validateEquivocation(blk.Block()))
})
t.Run("block already exists", func(t *testing.T) {
st, err := util.NewBeaconState()
require.NoError(t, err)
require.NoError(t, st.SetSlot(10))
fc := doublylinkedtree.New()
require.NoError(t, fc.InsertNode(context.Background(), st, bytesutil.ToBytes32([]byte("root"))))
server := &Server{
ForkchoiceFetcher: &testing2.ChainService{ForkChoiceStore: fc},
}
blk, err := blocks.NewSignedBeaconBlock(util.NewBeaconBlock())
require.NoError(t, err)
blk.SetSlot(st.Slot())
assert.ErrorContains(t, "already exists", server.validateEquivocation(blk.Block()))
})
}
const (
phase0Block = `{
"message": {

View File

@@ -383,6 +383,9 @@ func TestListPoolVoluntaryExits(t *testing.T) {
func TestSubmitAttesterSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -460,6 +463,9 @@ func TestSubmitAttesterSlashing_Ok(t *testing.T) {
func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
@@ -536,6 +542,10 @@ func TestSubmitAttesterSlashing_AcrossFork(t *testing.T) {
func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
bs, err := util.NewBeaconState()
require.NoError(t, err)
@@ -577,6 +587,9 @@ func TestSubmitAttesterSlashing_InvalidSlashing(t *testing.T) {
func TestSubmitProposerSlashing_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -647,6 +660,9 @@ func TestSubmitProposerSlashing_Ok(t *testing.T) {
func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = 1
@@ -715,6 +731,10 @@ func TestSubmitProposerSlashing_AcrossFork(t *testing.T) {
func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
bs, err := util.NewBeaconState()
require.NoError(t, err)
@@ -749,6 +769,9 @@ func TestSubmitProposerSlashing_InvalidSlashing(t *testing.T) {
func TestSubmitVoluntaryExit_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -796,6 +819,9 @@ func TestSubmitVoluntaryExit_Ok(t *testing.T) {
func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
config := params.BeaconConfig()
config.AltairForkEpoch = params.BeaconConfig().ShardCommitteePeriod + 1
@@ -837,6 +863,9 @@ func TestSubmitVoluntaryExit_AcrossFork(t *testing.T) {
func TestSubmitVoluntaryExit_InvalidValidatorIndex(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -872,6 +901,9 @@ func TestSubmitVoluntaryExit_InvalidValidatorIndex(t *testing.T) {
func TestSubmitVoluntaryExit_InvalidExit(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
_, keys, err := util.DeterministicDepositsAndKeys(1)
require.NoError(t, err)
validator := &ethpbv1alpha1.Validator{
@@ -906,6 +938,10 @@ func TestSubmitVoluntaryExit_InvalidExit(t *testing.T) {
func TestServer_SubmitAttestations_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -1014,6 +1050,9 @@ func TestServer_SubmitAttestations_Ok(t *testing.T) {
func TestServer_SubmitAttestations_ValidAttestationSubmitted(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -1116,6 +1155,9 @@ func TestServer_SubmitAttestations_ValidAttestationSubmitted(t *testing.T) {
func TestServer_SubmitAttestations_InvalidAttestationGRPCHeader(t *testing.T) {
ctx := grpc.NewContextWithServerTransportStream(context.Background(), &runtime.ServerTransportStream{})
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -1219,6 +1261,10 @@ func TestListBLSToExecutionChanges(t *testing.T) {
func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -1312,6 +1358,10 @@ func TestSubmitSignedBLSToExecutionChanges_Ok(t *testing.T) {
func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.
@@ -1420,6 +1470,10 @@ func TestSubmitSignedBLSToExecutionChanges_Bellatrix(t *testing.T) {
func TestSubmitSignedBLSToExecutionChanges_Failures(t *testing.T) {
ctx := context.Background()
transition.SkipSlotCache.Disable()
defer transition.SkipSlotCache.Enable()
params.SetupTestConfigCleanup(t)
c := params.BeaconConfig().Copy()
// Required for correct committee size calculation.

View File

@@ -11,6 +11,7 @@ import (
statenative "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
@@ -102,13 +103,13 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
return &ethpb.StateValidatorsResponse{Data: valContainers, ExecutionOptimistic: isOptimistic, Finalized: isFinalized}, nil
}
filterStatus := make(map[ethpb.ValidatorStatus]bool, len(req.Status))
const lastValidStatusValue = ethpb.ValidatorStatus(12)
filterStatus := make(map[validator.ValidatorStatus]bool, len(req.Status))
const lastValidStatusValue = 12
for _, ss := range req.Status {
if ss > lastValidStatusValue {
return nil, status.Errorf(codes.InvalidArgument, "Invalid status "+ss.String())
}
filterStatus[ss] = true
filterStatus[validator.ValidatorStatus(ss)] = true
}
epoch := slots.ToEpoch(st.Slot())
filteredVals := make([]*ethpb.ValidatorContainer, 0, len(valContainers))
@@ -243,8 +244,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
if len(validatorIds) == 0 {
allValidators := state.Validators()
valContainers = make([]*ethpb.ValidatorContainer, len(allValidators))
for i, validator := range allValidators {
readOnlyVal, err := statenative.NewValidator(validator)
for i, val := range allValidators {
readOnlyVal, err := statenative.NewValidator(val)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert validator: %v", err)
}
@@ -255,8 +256,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
valContainers[i] = &ethpb.ValidatorContainer{
Index: primitives.ValidatorIndex(i),
Balance: allBalances[i],
Status: subStatus,
Validator: migration.V1Alpha1ValidatorToV1(validator),
Status: ethpb.ValidatorStatus(subStatus),
Validator: migration.V1Alpha1ValidatorToV1(val),
}
}
} else {
@@ -278,7 +279,7 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
}
valIndex = primitives.ValidatorIndex(index)
}
validator, err := state.ValidatorAtIndex(valIndex)
val, err := state.ValidatorAtIndex(valIndex)
if _, ok := err.(*statenative.ValidatorIndexOutOfRangeError); ok {
// Ignore well-formed yet unknown indexes.
continue
@@ -286,8 +287,8 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
if err != nil {
return nil, errors.Wrap(err, "could not get validator")
}
v1Validator := migration.V1Alpha1ValidatorToV1(validator)
readOnlyVal, err := statenative.NewValidator(validator)
v1Validator := migration.V1Alpha1ValidatorToV1(val)
readOnlyVal, err := statenative.NewValidator(val)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not convert validator: %v", err)
}
@@ -298,7 +299,7 @@ func valContainersByRequestIds(state state.BeaconState, validatorIds [][]byte) (
valContainers = append(valContainers, &ethpb.ValidatorContainer{
Index: valIndex,
Balance: allBalances[valIndex],
Status: subStatus,
Status: ethpb.ValidatorStatus(subStatus),
Validator: v1Validator,
})
}

View File

@@ -15,6 +15,7 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -463,7 +464,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == ethpb.ValidatorStatus_ACTIVE,
status == validator.Active,
)
require.Equal(
t,
@@ -501,7 +502,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == ethpb.ValidatorStatus_ACTIVE_ONGOING,
status == validator.ActiveOngoing,
)
require.Equal(
t,
@@ -538,7 +539,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == ethpb.ValidatorStatus_EXITED,
status == validator.Exited,
)
require.Equal(
t,
@@ -574,7 +575,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == ethpb.ValidatorStatus_PENDING_INITIALIZED || status == ethpb.ValidatorStatus_EXITED_UNSLASHED,
status == validator.PendingInitialized || status == validator.ExitedUnslashed,
)
require.Equal(
t,
@@ -612,7 +613,7 @@ func TestListValidators_Status(t *testing.T) {
require.Equal(
t,
true,
status == ethpb.ValidatorStatus_PENDING || subStatus == ethpb.ValidatorStatus_EXITED_SLASHED,
status == validator.Pending || subStatus == validator.ExitedSlashed,
)
require.Equal(
t,

View File

@@ -16,7 +16,7 @@ go_library(
"//beacon-chain/rpc/lookup:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//proto/engine/v1:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
@@ -36,7 +36,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -12,7 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
enginev1 "github.com/prysmaticlabs/prysm/v4/proto/engine/v1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
)
@@ -22,7 +22,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Retrieve beacon state
stateId := mux.Vars(r)["state_id"]
if stateId == "" {
network.WriteError(w, &network.DefaultErrorJson{
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "state_id is required in URL params",
Code: http.StatusBadRequest,
})
@@ -30,7 +30,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
}
st, err := s.Stater.State(r.Context(), []byte(stateId))
if err != nil {
network.WriteError(w, handleWrapError(err, "could not retrieve state", http.StatusNotFound))
http2.WriteError(w, handleWrapError(err, "could not retrieve state", http.StatusNotFound))
return
}
queryParam := r.URL.Query().Get("proposal_slot")
@@ -38,7 +38,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
if queryParam != "" {
pSlot, err := strconv.ParseUint(queryParam, 10, 64)
if err != nil {
network.WriteError(w, handleWrapError(err, "invalid proposal slot value", http.StatusBadRequest))
http2.WriteError(w, handleWrapError(err, "invalid proposal slot value", http.StatusBadRequest))
return
}
proposalSlot = primitives.Slot(pSlot)
@@ -48,18 +48,18 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Perform sanity checks on proposal slot before computing state
capellaStart, err := slots.EpochStart(params.BeaconConfig().CapellaForkEpoch)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not calculate Capella start slot", http.StatusInternalServerError))
http2.WriteError(w, handleWrapError(err, "could not calculate Capella start slot", http.StatusInternalServerError))
return
}
if proposalSlot < capellaStart {
network.WriteError(w, &network.DefaultErrorJson{
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "expected withdrawals are not supported before Capella fork",
Code: http.StatusBadRequest,
})
return
}
if proposalSlot <= st.Slot() {
network.WriteError(w, &network.DefaultErrorJson{
http2.WriteError(w, &http2.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot must be bigger than state slot. proposal slot: %d, state slot: %d", proposalSlot, st.Slot()),
Code: http.StatusBadRequest,
})
@@ -67,7 +67,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
}
lookAheadLimit := uint64(params.BeaconConfig().SlotsPerEpoch.Mul(uint64(params.BeaconConfig().MaxSeedLookahead)))
if st.Slot().Add(lookAheadLimit) <= proposalSlot {
network.WriteError(w, &network.DefaultErrorJson{
http2.WriteError(w, &http2.DefaultErrorJson{
Message: fmt.Sprintf("proposal slot cannot be >= %d slots ahead of state slot", lookAheadLimit),
Code: http.StatusBadRequest,
})
@@ -76,12 +76,12 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Get metadata for response
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
http2.WriteError(w, handleWrapError(err, "could not get optimistic mode info", http.StatusInternalServerError))
return
}
root, err := helpers.BlockRootAtSlot(st, st.Slot()-1)
if err != nil {
network.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
http2.WriteError(w, handleWrapError(err, "could not get block root", http.StatusInternalServerError))
return
}
var blockRoot = [32]byte(root)
@@ -89,7 +89,7 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
// Advance state forward to proposal slot
st, err = transition.ProcessSlots(r.Context(), st, proposalSlot)
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "could not process slots",
Code: http.StatusInternalServerError,
})
@@ -97,13 +97,13 @@ func (s *Server) ExpectedWithdrawals(w http.ResponseWriter, r *http.Request) {
}
withdrawals, err := st.ExpectedWithdrawals()
if err != nil {
network.WriteError(w, &network.DefaultErrorJson{
http2.WriteError(w, &http2.DefaultErrorJson{
Message: "could not get expected withdrawals",
Code: http.StatusInternalServerError,
})
return
}
network.WriteJson(w, &ExpectedWithdrawalsResponse{
http2.WriteJson(w, &ExpectedWithdrawalsResponse{
ExecutionOptimistic: isOptimistic,
Finalized: isFinalized,
Data: buildExpectedWithdrawalsData(withdrawals),
@@ -123,8 +123,8 @@ func buildExpectedWithdrawalsData(withdrawals []*enginev1.Withdrawal) []*Expecte
return data
}
func handleWrapError(err error, message string, code int) *network.DefaultErrorJson {
return &network.DefaultErrorJson{
func handleWrapError(err error, message string, code int) *http2.DefaultErrorJson {
return &http2.DefaultErrorJson{
Message: errors.Wrapf(err, message).Error(),
Code: code,
}

View File

@@ -16,7 +16,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -96,7 +96,7 @@ func TestExpectedWithdrawals_BadRequest(t *testing.T) {
s.ExpectedWithdrawals(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.StringContains(t, testCase.errorMessage, e.Message)

View File

@@ -13,14 +13,15 @@ go_library(
"//api/grpc:go_default_library",
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/eth/v1:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@org_golang_google_grpc//codes:go_default_library",
@@ -48,6 +49,7 @@ go_test(
"//config/params:go_default_library",
"//consensus-types/blocks:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/engine/v1:go_default_library",
"//proto/eth/v1:go_default_library",

View File

@@ -10,6 +10,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/api/grpc"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -38,8 +39,8 @@ func ValidateSyncGRPC(
return status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
syncDetailsContainer := &SyncDetailsContainer{
Data: &SyncDetailsJson{
syncDetailsContainer := &shared.SyncDetailsContainer{
Data: &shared.SyncDetails{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: true,
@@ -58,35 +59,6 @@ func ValidateSyncGRPC(
return status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
// ValidateSyncHTTP checks whether the node is currently syncing and returns sync information.
// It returns information whether the node is currently syncing along with sync details.
func ValidateSyncHTTP(
ctx context.Context,
syncChecker sync.Checker,
headFetcher blockchain.HeadFetcher,
timeFetcher blockchain.TimeFetcher,
optimisticModeFetcher blockchain.OptimisticModeFetcher,
) (bool, *SyncDetailsContainer, error) {
if !syncChecker.Syncing() {
return false, nil, nil
}
headSlot := headFetcher.HeadSlot()
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return true, nil, errors.Wrap(err, "could not check optimistic status")
}
syncDetails := &SyncDetailsContainer{
Data: &SyncDetailsJson{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: true,
IsOptimistic: isOptimistic,
},
}
return true, syncDetails, nil
}
// IsOptimistic checks whether the beacon state's block is optimistic.
func IsOptimistic(
ctx context.Context,
@@ -216,17 +188,3 @@ func isStateRootOptimistic(
// No block matching requested state root, return true.
return true, nil
}
// SyncDetailsJson contains information about node sync status.
type SyncDetailsJson struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
IsOptimistic bool `json:"is_optimistic"`
ElOffline bool `json:"el_offline"`
}
// SyncDetailsContainer is a wrapper for Data.
type SyncDetailsContainer struct {
Data *SyncDetailsJson `json:"data"`
}

View File

@@ -5,66 +5,66 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
)
// ValidatorStatus returns a validator's status at the given epoch.
func ValidatorStatus(validator state.ReadOnlyValidator, epoch primitives.Epoch) (ethpb.ValidatorStatus, error) {
valStatus, err := ValidatorSubStatus(validator, epoch)
func ValidatorStatus(val state.ReadOnlyValidator, epoch primitives.Epoch) (validator.ValidatorStatus, error) {
valStatus, err := ValidatorSubStatus(val, epoch)
if err != nil {
return 0, errors.Wrap(err, "could not get sub status")
return 0, errors.Wrap(err, "could not get validator sub status")
}
switch valStatus {
case ethpb.ValidatorStatus_PENDING_INITIALIZED, ethpb.ValidatorStatus_PENDING_QUEUED:
return ethpb.ValidatorStatus_PENDING, nil
case ethpb.ValidatorStatus_ACTIVE_ONGOING, ethpb.ValidatorStatus_ACTIVE_SLASHED, ethpb.ValidatorStatus_ACTIVE_EXITING:
return ethpb.ValidatorStatus_ACTIVE, nil
case ethpb.ValidatorStatus_EXITED_UNSLASHED, ethpb.ValidatorStatus_EXITED_SLASHED:
return ethpb.ValidatorStatus_EXITED, nil
case ethpb.ValidatorStatus_WITHDRAWAL_POSSIBLE, ethpb.ValidatorStatus_WITHDRAWAL_DONE:
return ethpb.ValidatorStatus_WITHDRAWAL, nil
case validator.PendingInitialized, validator.PendingQueued:
return validator.Pending, nil
case validator.ActiveOngoing, validator.ActiveSlashed, validator.ActiveExiting:
return validator.Active, nil
case validator.ExitedUnslashed, validator.ExitedSlashed:
return validator.Exited, nil
case validator.WithdrawalPossible, validator.WithdrawalDone:
return validator.Withdrawal, nil
}
return 0, errors.New("invalid validator state")
}
// ValidatorSubStatus returns a validator's sub-status at the given epoch.
func ValidatorSubStatus(validator state.ReadOnlyValidator, epoch primitives.Epoch) (ethpb.ValidatorStatus, error) {
func ValidatorSubStatus(val state.ReadOnlyValidator, epoch primitives.Epoch) (validator.ValidatorStatus, error) {
farFutureEpoch := params.BeaconConfig().FarFutureEpoch
// Pending.
if validator.ActivationEpoch() > epoch {
if validator.ActivationEligibilityEpoch() == farFutureEpoch {
return ethpb.ValidatorStatus_PENDING_INITIALIZED, nil
} else if validator.ActivationEligibilityEpoch() < farFutureEpoch {
return ethpb.ValidatorStatus_PENDING_QUEUED, nil
if val.ActivationEpoch() > epoch {
if val.ActivationEligibilityEpoch() == farFutureEpoch {
return validator.PendingInitialized, nil
} else if val.ActivationEligibilityEpoch() < farFutureEpoch {
return validator.PendingQueued, nil
}
}
// Active.
if validator.ActivationEpoch() <= epoch && epoch < validator.ExitEpoch() {
if validator.ExitEpoch() == farFutureEpoch {
return ethpb.ValidatorStatus_ACTIVE_ONGOING, nil
} else if validator.ExitEpoch() < farFutureEpoch {
if validator.Slashed() {
return ethpb.ValidatorStatus_ACTIVE_SLASHED, nil
if val.ActivationEpoch() <= epoch && epoch < val.ExitEpoch() {
if val.ExitEpoch() == farFutureEpoch {
return validator.ActiveOngoing, nil
} else if val.ExitEpoch() < farFutureEpoch {
if val.Slashed() {
return validator.ActiveSlashed, nil
}
return ethpb.ValidatorStatus_ACTIVE_EXITING, nil
return validator.ActiveExiting, nil
}
}
// Exited.
if validator.ExitEpoch() <= epoch && epoch < validator.WithdrawableEpoch() {
if validator.Slashed() {
return ethpb.ValidatorStatus_EXITED_SLASHED, nil
if val.ExitEpoch() <= epoch && epoch < val.WithdrawableEpoch() {
if val.Slashed() {
return validator.ExitedSlashed, nil
}
return ethpb.ValidatorStatus_EXITED_UNSLASHED, nil
return validator.ExitedUnslashed, nil
}
if validator.WithdrawableEpoch() <= epoch {
if validator.EffectiveBalance() != 0 {
return ethpb.ValidatorStatus_WITHDRAWAL_POSSIBLE, nil
if val.WithdrawableEpoch() <= epoch {
if val.EffectiveBalance() != 0 {
return validator.WithdrawalPossible, nil
} else {
return ethpb.ValidatorStatus_WITHDRAWAL_DONE, nil
return validator.WithdrawalDone, nil
}
}

View File

@@ -7,6 +7,7 @@ import (
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
"github.com/prysmaticlabs/prysm/v4/proto/migration"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
@@ -23,7 +24,7 @@ func Test_ValidatorStatus(t *testing.T) {
tests := []struct {
name string
args args
want ethpb.ValidatorStatus
want validator.ValidatorStatus
wantErr bool
}{
{
@@ -35,7 +36,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_PENDING,
want: validator.Pending,
},
{
name: "pending queued",
@@ -46,7 +47,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_PENDING,
want: validator.Pending,
},
{
name: "active ongoing",
@@ -57,7 +58,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_ACTIVE,
want: validator.Active,
},
{
name: "active slashed",
@@ -69,7 +70,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_ACTIVE,
want: validator.Active,
},
{
name: "active exiting",
@@ -81,7 +82,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_ACTIVE,
want: validator.Active,
},
{
name: "exited slashed",
@@ -94,7 +95,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(35),
},
want: ethpb.ValidatorStatus_EXITED,
want: validator.Exited,
},
{
name: "exited unslashed",
@@ -107,7 +108,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(35),
},
want: ethpb.ValidatorStatus_EXITED,
want: validator.Exited,
},
{
name: "withdrawal possible",
@@ -121,7 +122,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(45),
},
want: ethpb.ValidatorStatus_WITHDRAWAL,
want: validator.Withdrawal,
},
{
name: "withdrawal done",
@@ -135,7 +136,7 @@ func Test_ValidatorStatus(t *testing.T) {
},
epoch: primitives.Epoch(45),
},
want: ethpb.ValidatorStatus_WITHDRAWAL,
want: validator.Withdrawal,
},
}
for _, tt := range tests {
@@ -161,7 +162,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
tests := []struct {
name string
args args
want ethpb.ValidatorStatus
want validator.ValidatorStatus
wantErr bool
}{
{
@@ -173,7 +174,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_PENDING_INITIALIZED,
want: validator.PendingInitialized,
},
{
name: "pending queued",
@@ -184,7 +185,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_PENDING_QUEUED,
want: validator.PendingQueued,
},
{
name: "active ongoing",
@@ -195,7 +196,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_ACTIVE_ONGOING,
want: validator.ActiveOngoing,
},
{
name: "active slashed",
@@ -207,7 +208,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_ACTIVE_SLASHED,
want: validator.ActiveSlashed,
},
{
name: "active exiting",
@@ -219,7 +220,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(5),
},
want: ethpb.ValidatorStatus_ACTIVE_EXITING,
want: validator.ActiveExiting,
},
{
name: "exited slashed",
@@ -232,7 +233,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(35),
},
want: ethpb.ValidatorStatus_EXITED_SLASHED,
want: validator.ExitedSlashed,
},
{
name: "exited unslashed",
@@ -245,7 +246,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(35),
},
want: ethpb.ValidatorStatus_EXITED_UNSLASHED,
want: validator.ExitedUnslashed,
},
{
name: "withdrawal possible",
@@ -259,7 +260,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(45),
},
want: ethpb.ValidatorStatus_WITHDRAWAL_POSSIBLE,
want: validator.WithdrawalPossible,
},
{
name: "withdrawal done",
@@ -273,7 +274,7 @@ func Test_ValidatorSubStatus(t *testing.T) {
},
epoch: primitives.Epoch(45),
},
want: ethpb.ValidatorStatus_WITHDRAWAL_DONE,
want: validator.WithdrawalDone,
},
}
for _, tt := range tests {

View File

@@ -3,8 +3,10 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"node.go",
"server.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/node",
visibility = ["//beacon-chain:__subpackages__"],
@@ -17,6 +19,7 @@ go_library(
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/sync:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/migration:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -35,6 +38,7 @@ go_library(
go_test(
name = "go_default_test",
srcs = [
"handlers_test.go",
"node_test.go",
"server_test.go",
],

View File

@@ -0,0 +1,34 @@
package node
import (
"net/http"
"strconv"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
"go.opencensus.io/trace"
)
// GetSyncStatus requests the beacon node to describe if it's currently syncing or not, and
// if it is, what block it is up to.
func (s *Server) GetSyncStatus(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "node.GetSyncStatus")
defer span.End()
isOptimistic, err := s.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
http2.HandleError(w, "Could not check optimistic status: "+err.Error(), http.StatusInternalServerError)
return
}
headSlot := s.HeadFetcher.HeadSlot()
response := &SyncStatusResponse{
Data: &SyncStatusResponseData{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(s.GenesisTimeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: s.SyncChecker.Syncing(),
IsOptimistic: isOptimistic,
ElOffline: !s.ExecutionChainInfoFetcher.ExecutionClientConnected(),
},
}
http2.WriteJson(w, response)
}

View File

@@ -0,0 +1,52 @@
package node
import (
"bytes"
"encoding/json"
"net/http"
"net/http/httptest"
"testing"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
syncmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
)
func TestSyncStatus(t *testing.T) {
currentSlot := new(primitives.Slot)
*currentSlot = 110
state, err := util.NewBeaconState()
require.NoError(t, err)
err = state.SetSlot(100)
require.NoError(t, err)
chainService := &mock.ChainService{Slot: currentSlot, State: state, Optimistic: true}
syncChecker := &syncmock.Sync{}
syncChecker.IsSyncing = true
s := &Server{
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: syncChecker,
ExecutionChainInfoFetcher: &testutil.MockExecutionChainInfoFetcher{},
}
request := httptest.NewRequest(http.MethodGet, "http://example.com", nil)
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.GetSyncStatus(writer, request)
assert.Equal(t, http.StatusOK, writer.Code)
resp := &SyncStatusResponse{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), resp))
require.NotNil(t, resp)
assert.Equal(t, "100", resp.Data.HeadSlot)
assert.Equal(t, "10", resp.Data.SyncDistance)
assert.Equal(t, true, resp.Data.IsSyncing)
assert.Equal(t, true, resp.Data.IsOptimistic)
assert.Equal(t, false, resp.Data.ElOffline)
}

View File

@@ -259,29 +259,6 @@ func (_ *Server) GetVersion(ctx context.Context, _ *emptypb.Empty) (*ethpb.Versi
}, nil
}
// GetSyncStatus requests the beacon node to describe if it's currently syncing or not, and
// if it is, what block it is up to.
func (ns *Server) GetSyncStatus(ctx context.Context, _ *emptypb.Empty) (*ethpb.SyncingResponse, error) {
ctx, span := trace.StartSpan(ctx, "node.GetSyncStatus")
defer span.End()
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
}
headSlot := ns.HeadFetcher.HeadSlot()
return &ethpb.SyncingResponse{
Data: &ethpb.SyncInfo{
HeadSlot: headSlot,
SyncDistance: ns.GenesisTimeFetcher.CurrentSlot() - headSlot,
IsSyncing: ns.SyncChecker.Syncing(),
IsOptimistic: isOptimistic,
ElOffline: !ns.ExecutionChainInfoFetcher.ExecutionClientConnected(),
},
}, nil
}
// GetHealth returns node health status in http status codes. Useful for load balancers.
// Response Usage:
//

View File

@@ -18,20 +18,16 @@ import (
ma "github.com/multiformats/go-multiaddr"
"github.com/prysmaticlabs/go-bitfield"
grpcutil "github.com/prysmaticlabs/prysm/v4/api/grpc"
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/testutil"
syncmock "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/wrapper"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
pb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
"github.com/prysmaticlabs/prysm/v4/testing/util"
"google.golang.org/grpc"
"google.golang.org/protobuf/types/known/emptypb"
)
@@ -161,33 +157,6 @@ func TestGetIdentity(t *testing.T) {
})
}
func TestSyncStatus(t *testing.T) {
currentSlot := new(primitives.Slot)
*currentSlot = 110
state, err := util.NewBeaconState()
require.NoError(t, err)
err = state.SetSlot(100)
require.NoError(t, err)
chainService := &mock.ChainService{Slot: currentSlot, State: state, Optimistic: true}
syncChecker := &syncmock.Sync{}
syncChecker.IsSyncing = true
s := &Server{
HeadFetcher: chainService,
GenesisTimeFetcher: chainService,
OptimisticModeFetcher: chainService,
SyncChecker: syncChecker,
ExecutionChainInfoFetcher: &testutil.MockExecutionChainInfoFetcher{},
}
resp, err := s.GetSyncStatus(context.Background(), &emptypb.Empty{})
require.NoError(t, err)
assert.Equal(t, primitives.Slot(100), resp.Data.HeadSlot)
assert.Equal(t, primitives.Slot(10), resp.Data.SyncDistance)
assert.Equal(t, true, resp.Data.IsSyncing)
assert.Equal(t, true, resp.Data.IsOptimistic)
assert.Equal(t, false, resp.Data.ElOffline)
}
func TestGetPeer(t *testing.T) {
const rawId = "16Uiu2HAkvyYtoQXZNTsthjgLHjEnv7kvwzEmjvsJjWXpbhtqpSUN"
ctx := context.Background()

View File

@@ -0,0 +1,13 @@
package node
type SyncStatusResponse struct {
Data *SyncStatusResponseData `json:"data"`
}
type SyncStatusResponseData struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
IsOptimistic bool `json:"is_optimistic"`
ElOffline bool `json:"el_offline"`
}

View File

@@ -23,7 +23,7 @@ go_library(
"//consensus-types/blocks:go_default_library",
"//consensus-types/interfaces:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//runtime/version:go_default_library",
"//time/slots:go_default_library",
"@com_github_pkg_errors//:go_default_library",
@@ -51,7 +51,7 @@ go_test(
"//crypto/bls:go_default_library",
"//crypto/bls/blst:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",

View File

@@ -19,7 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/blocks"
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
"github.com/prysmaticlabs/prysm/v4/runtime/version"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/wealdtech/go-bytesutil"
@@ -32,15 +32,15 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
blk, err := s.Blocker.Block(r.Context(), []byte(blockId))
if errJson := handleGetBlockError(blk, err); errJson != nil {
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if blk.Version() == version.Phase0 {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Block rewards are not supported for Phase 0 blocks",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -49,114 +49,114 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
// To do this, we replay the state up to the block's slot, but before processing the block.
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get state: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
proposerIndex := blk.Block().ProposerIndex()
initBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
st, err = altair.ProcessAttestationsNoVerifySignature(r.Context(), st, blk)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get attestation rewards" + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
attBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
st, err = coreblocks.ProcessAttesterSlashings(r.Context(), st, blk.Block().Body().AttesterSlashings(), validators.SlashValidator)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get attester slashing rewards: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
attSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
st, err = coreblocks.ProcessProposerSlashings(r.Context(), st, blk.Block().Body().ProposerSlashings(), validators.SlashValidator)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get proposer slashing rewards" + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
proposerSlashingsBalance, err := st.BalanceAtIndex(proposerIndex)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get proposer's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
sa, err := blk.Block().Body().SyncAggregate()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get sync aggregate: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
var syncCommitteeReward uint64
_, syncCommitteeReward, err = altair.ProcessSyncAggregate(r.Context(), st, sa)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get sync aggregate rewards: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get optimistic mode info: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get block root: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -172,7 +172,7 @@ func (s *Server) BlockRewards(w http.ResponseWriter, r *http.Request) {
ExecutionOptimistic: optimistic,
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
}
network.WriteJson(w, response)
http2.WriteJson(w, response)
}
// AttestationRewards retrieves attestation reward info for validators specified by array of public keys or validator index.
@@ -198,20 +198,20 @@ func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get optimistic mode info: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
blkRoot, err := st.LatestBlockHeader().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get block root: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -223,7 +223,7 @@ func (s *Server) AttestationRewards(w http.ResponseWriter, r *http.Request) {
ExecutionOptimistic: optimistic,
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
}
network.WriteJson(w, resp)
http2.WriteJson(w, resp)
}
// SyncCommitteeRewards retrieves rewards info for sync committee members specified by array of public keys or validator index.
@@ -234,33 +234,33 @@ func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
blk, err := s.Blocker.Block(r.Context(), []byte(blockId))
if errJson := handleGetBlockError(blk, err); errJson != nil {
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
if blk.Version() == version.Phase0 {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Sync committee rewards are not supported for Phase 0",
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
st, err := s.ReplayerBuilder.ReplayerForSlot(blk.Block().Slot()-1).ReplayToSlot(r.Context(), blk.Block().Slot())
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get state: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
sa, err := blk.Block().Body().SyncAggregate()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get sync aggregate: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -272,22 +272,22 @@ func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
for i, valIdx := range valIndices {
preProcessBals[i], err = st.BalanceAtIndex(valIdx)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get validator's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
}
_, proposerReward, err := altair.ProcessSyncAggregate(r.Context(), st, sa)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get sync aggregate rewards: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -296,11 +296,11 @@ func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
for i, valIdx := range valIndices {
bal, err := st.BalanceAtIndex(valIdx)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get validator's balance: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
rewards[i] = int(bal - preProcessBals[i]) // lint:ignore uintcast
@@ -311,20 +311,20 @@ func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
optimistic, err := s.OptimisticModeFetcher.IsOptimistic(r.Context())
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get optimistic mode info: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
blkRoot, err := blk.Block().HashTreeRoot()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get block root: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -340,53 +340,53 @@ func (s *Server) SyncCommitteeRewards(w http.ResponseWriter, r *http.Request) {
ExecutionOptimistic: optimistic,
Finalized: s.FinalizationFetcher.IsFinalized(r.Context(), blkRoot),
}
network.WriteJson(w, response)
http2.WriteJson(w, response)
}
func (s *Server) attRewardsState(w http.ResponseWriter, r *http.Request) (state.BeaconState, bool) {
segments := strings.Split(r.URL.Path, "/")
requestedEpoch, err := strconv.ParseUint(segments[len(segments)-1], 10, 64)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode epoch: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
if primitives.Epoch(requestedEpoch) < params.BeaconConfig().AltairForkEpoch {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Attestation rewards are not supported for Phase 0",
Code: http.StatusNotFound,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
currentEpoch := uint64(slots.ToEpoch(s.TimeFetcher.CurrentSlot()))
if requestedEpoch+1 >= currentEpoch {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Code: http.StatusNotFound,
Message: "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion",
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
nextEpochEnd, err := slots.EpochEnd(primitives.Epoch(requestedEpoch + 1))
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get next epoch's ending slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
st, err := s.Stater.StateBySlot(r.Context(), nextEpochEnd)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get state for epoch's starting slot: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
return st, true
@@ -399,20 +399,20 @@ func attRewardsBalancesAndVals(
) (*precompute.Balance, []*precompute.Validator, []primitives.ValidatorIndex, bool) {
allVals, bal, err := altair.InitializePrecomputeValidators(r.Context(), st)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not initialize precompute validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, nil, nil, false
}
allVals, bal, err = altair.ProcessEpochParticipation(r.Context(), st, bal, allVals)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not process epoch participation: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, nil, nil, false
}
valIndices, ok := requestedValIndices(w, r, st, allVals)
@@ -463,11 +463,11 @@ func idealAttRewards(
}
deltas, err := altair.AttestationsDelta(st, bal, idealVals)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get attestations delta: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
for i, d := range deltas {
@@ -499,11 +499,11 @@ func totalAttRewards(
}
deltas, err := altair.AttestationsDelta(st, bal, vals)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get attestations delta: " + err.Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
for i, d := range deltas {
@@ -529,11 +529,11 @@ func syncRewardsVals(
) ([]*precompute.Validator, []primitives.ValidatorIndex, bool) {
allVals, _, err := altair.InitializePrecomputeValidators(r.Context(), st)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not initialize precompute validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, nil, false
}
valIndices, ok := requestedValIndices(w, r, st, allVals)
@@ -543,22 +543,22 @@ func syncRewardsVals(
sc, err := st.CurrentSyncCommittee()
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not get current sync committee: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, nil, false
}
allScIndices := make([]primitives.ValidatorIndex, len(sc.Pubkeys))
for i, pk := range sc.Pubkeys {
valIdx, ok := st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pk))
if !ok {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: fmt.Sprintf("No validator index found for pubkey %#x", pk),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, nil, false
}
allScIndices[i] = valIdx
@@ -583,11 +583,11 @@ func requestedValIndices(w http.ResponseWriter, r *http.Request, st state.Beacon
var rawValIds []string
if r.Body != http.NoBody {
if err := json.NewDecoder(r.Body).Decode(&rawValIds); err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: "Could not decode validators: " + err.Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
}
@@ -597,30 +597,30 @@ func requestedValIndices(w http.ResponseWriter, r *http.Request, st state.Beacon
if err != nil {
pubkey, err := bytesutil.FromHexString(v)
if err != nil || len(pubkey) != fieldparams.BLSPubkeyLength {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: fmt.Sprintf("%s is not a validator index or pubkey", v),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
var ok bool
valIndices[i], ok = st.ValidatorIndexByPubkey(bytesutil.ToBytes48(pubkey))
if !ok {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: fmt.Sprintf("No validator index found for pubkey %#x", pubkey),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
} else {
if index >= uint64(st.NumValidators()) {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: fmt.Sprintf("Validator index %d is too large. Maximum allowed index is %d", index, st.NumValidators()-1),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return nil, false
}
valIndices[i] = primitives.ValidatorIndex(index)
@@ -636,21 +636,21 @@ func requestedValIndices(w http.ResponseWriter, r *http.Request, st state.Beacon
return valIndices, true
}
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *network.DefaultErrorJson {
func handleGetBlockError(blk interfaces.ReadOnlySignedBeaconBlock, err error) *http2.DefaultErrorJson {
if errors.Is(err, lookup.BlockIdParseError{}) {
return &network.DefaultErrorJson{
return &http2.DefaultErrorJson{
Message: "Invalid block ID: " + err.Error(),
Code: http.StatusBadRequest,
}
}
if err != nil {
return &network.DefaultErrorJson{
return &http2.DefaultErrorJson{
Message: "Could not get block from block ID: " + err.Error(),
Code: http.StatusInternalServerError,
}
}
if err := blocks.BeaconBlockIsNil(blk); err != nil {
return &network.DefaultErrorJson{
return &http2.DefaultErrorJson{
Message: "Could not find requested block: " + err.Error(),
Code: http.StatusNotFound,
}

View File

@@ -27,7 +27,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/crypto/bls/blst"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
@@ -197,7 +197,7 @@ func TestBlockRewards(t *testing.T) {
s.BlockRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Block rewards are not supported for Phase 0 blocks", e.Message)
@@ -399,7 +399,7 @@ func TestAttestationRewards(t *testing.T) {
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "foo is not a validator index or pubkey", e.Message)
@@ -420,7 +420,7 @@ func TestAttestationRewards(t *testing.T) {
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "No validator index found for pubkey "+pubkey, e.Message)
@@ -438,7 +438,7 @@ func TestAttestationRewards(t *testing.T) {
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Validator index 999 is too large. Maximum allowed index is 63", e.Message)
@@ -451,7 +451,7 @@ func TestAttestationRewards(t *testing.T) {
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusNotFound, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.Equal(t, "Attestation rewards are not supported for Phase 0", e.Message)
@@ -464,7 +464,7 @@ func TestAttestationRewards(t *testing.T) {
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, true, strings.Contains(e.Message, "Could not decode epoch"))
@@ -477,7 +477,7 @@ func TestAttestationRewards(t *testing.T) {
s.AttestationRewards(writer, request)
assert.Equal(t, http.StatusNotFound, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusNotFound, e.Code)
assert.Equal(t, "Attestation rewards are available after two epoch transitions to ensure all attestations have a chance of inclusion", e.Message)
@@ -702,7 +702,7 @@ func TestSyncCommiteeRewards(t *testing.T) {
s.SyncCommitteeRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "foo is not a validator index or pubkey", e.Message)
@@ -729,7 +729,7 @@ func TestSyncCommiteeRewards(t *testing.T) {
s.SyncCommitteeRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "No validator index found for pubkey "+pubkey, e.Message)
@@ -753,7 +753,7 @@ func TestSyncCommiteeRewards(t *testing.T) {
s.SyncCommitteeRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Validator index 9999 is too large. Maximum allowed index is 1023", e.Message)
@@ -772,7 +772,7 @@ func TestSyncCommiteeRewards(t *testing.T) {
s.SyncCommitteeRewards(writer, request)
assert.Equal(t, http.StatusBadRequest, writer.Code)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, e.Code)
assert.Equal(t, "Sync committee rewards are not supported for Phase 0", e.Message)

View File

@@ -0,0 +1,32 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"errors.go",
"request.go",
"structs.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared",
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/sync:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/http:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["errors_test.go"],
embed = [":go_default_library"],
deps = [
"//testing/assert:go_default_library",
"@com_github_pkg_errors//:go_default_library",
],
)

View File

@@ -0,0 +1,28 @@
package shared
import (
"fmt"
"strings"
)
// DecodeError represents an error resulting from trying to decode an HTTP request.
// It tracks the full field name for which decoding failed.
type DecodeError struct {
path []string
err error
}
// NewDecodeError wraps an error (either the initial decoding error or another DecodeError).
// The current field that failed decoding must be passed in.
func NewDecodeError(err error, field string) *DecodeError {
de, ok := err.(*DecodeError)
if ok {
return &DecodeError{path: append([]string{field}, de.path...), err: de.err}
}
return &DecodeError{path: []string{field}, err: err}
}
// Error returns the formatted error message which contains the full field name and the actual decoding error.
func (e *DecodeError) Error() string {
return fmt.Sprintf("could not decode %s: %s", strings.Join(e.path, "."), e.err.Error())
}

View File

@@ -0,0 +1,16 @@
package shared
import (
"testing"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
)
func TestDecodeError(t *testing.T) {
e := errors.New("not a number")
de := NewDecodeError(e, "Z")
de = NewDecodeError(de, "Y")
de = NewDecodeError(de, "X")
assert.Equal(t, "could not decode X.Y.Z: not a number", de.Error())
}

View File

@@ -0,0 +1,124 @@
package shared
import (
"context"
"encoding/json"
"net/http"
"strconv"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
)
func ValidateHex(w http.ResponseWriter, name string, s string) bool {
if s == "" {
errJson := &http2.DefaultErrorJson{
Message: name + " is required",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return false
}
if !bytesutil.IsHex([]byte(s)) {
errJson := &http2.DefaultErrorJson{
Message: name + " is invalid",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return false
}
return true
}
func ValidateUint(w http.ResponseWriter, name string, s string) (uint64, bool) {
if s == "" {
errJson := &http2.DefaultErrorJson{
Message: name + " is required",
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return 0, false
}
v, err := strconv.ParseUint(s, 10, 64)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: name + " is invalid: " + err.Error(),
Code: http.StatusBadRequest,
}
http2.WriteError(w, errJson)
return 0, false
}
return v, true
}
// IsSyncing checks whether the beacon node is currently syncing and writes out the sync status.
func IsSyncing(
ctx context.Context,
w http.ResponseWriter,
syncChecker sync.Checker,
headFetcher blockchain.HeadFetcher,
timeFetcher blockchain.TimeFetcher,
optimisticModeFetcher blockchain.OptimisticModeFetcher,
) bool {
if !syncChecker.Syncing() {
return false
}
headSlot := headFetcher.HeadSlot()
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not check optimistic status: " + err.Error(),
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
return true
}
syncDetails := &SyncDetailsContainer{
Data: &SyncDetails{
HeadSlot: strconv.FormatUint(uint64(headSlot), 10),
SyncDistance: strconv.FormatUint(uint64(timeFetcher.CurrentSlot()-headSlot), 10),
IsSyncing: true,
IsOptimistic: isOptimistic,
},
}
msg := "Beacon node is currently syncing and not serving request on that endpoint"
details, err := json.Marshal(syncDetails)
if err == nil {
msg += " Details: " + string(details)
}
errJson := &http2.DefaultErrorJson{
Message: msg,
Code: http.StatusServiceUnavailable}
http2.WriteError(w, errJson)
return true
}
// IsOptimistic checks whether the beacon node is currently optimistic and writes it to the response.
func IsOptimistic(
ctx context.Context,
w http.ResponseWriter,
optimisticModeFetcher blockchain.OptimisticModeFetcher,
) (bool, error) {
isOptimistic, err := optimisticModeFetcher.IsOptimistic(ctx)
if err != nil {
errJson := &http2.DefaultErrorJson{
Message: "Could not check optimistic status: " + err.Error(),
Code: http.StatusInternalServerError,
}
http2.WriteError(w, errJson)
return true, err
}
if !isOptimistic {
return false, nil
}
errJson := &http2.DefaultErrorJson{
Code: http.StatusServiceUnavailable,
Message: "Beacon node is currently optimistic and not serving validators",
}
http2.WriteError(w, errJson)
return true, nil
}

View File

@@ -0,0 +1,311 @@
package shared
import (
"fmt"
"strconv"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
type Attestation struct {
AggregationBits string `json:"aggregation_bits" validate:"required,hexadecimal"`
Data *AttestationData `json:"data" validate:"required"`
Signature string `json:"signature" validate:"required,hexadecimal"`
}
type AttestationData struct {
Slot string `json:"slot" validate:"required,number,gte=0"`
CommitteeIndex string `json:"index" validate:"required,number,gte=0"`
BeaconBlockRoot string `json:"beacon_block_root" validate:"required,hexadecimal"`
Source *Checkpoint `json:"source" validate:"required"`
Target *Checkpoint `json:"target" validate:"required"`
}
type Checkpoint struct {
Epoch string `json:"epoch" validate:"required,number,gte=0"`
Root string `json:"root" validate:"required,hexadecimal"`
}
type SignedContributionAndProof struct {
Message *ContributionAndProof `json:"message" validate:"required"`
Signature string `json:"signature" validate:"required,hexadecimal"`
}
type ContributionAndProof struct {
AggregatorIndex string `json:"aggregator_index" validate:"required,number,gte=0"`
Contribution *SyncCommitteeContribution `json:"contribution" validate:"required"`
SelectionProof string `json:"selection_proof" validate:"required,hexadecimal"`
}
type SyncCommitteeContribution struct {
Slot string `json:"slot" validate:"required,number,gte=0"`
BeaconBlockRoot string `json:"beacon_block_root" hex:"true" validate:"required,hexadecimal"`
SubcommitteeIndex string `json:"subcommittee_index" validate:"required,number,gte=0"`
AggregationBits string `json:"aggregation_bits" hex:"true" validate:"required,hexadecimal"`
Signature string `json:"signature" hex:"true" validate:"required,hexadecimal"`
}
type SignedAggregateAttestationAndProof struct {
Message *AggregateAttestationAndProof `json:"message" validate:"required"`
Signature string `json:"signature" validate:"required,hexadecimal"`
}
type AggregateAttestationAndProof struct {
AggregatorIndex string `json:"aggregator_index" validate:"required,number,gte=0"`
Aggregate *Attestation `json:"aggregate" validate:"required"`
SelectionProof string `json:"selection_proof" validate:"required,hexadecimal"`
}
type SyncCommitteeSubscription struct {
ValidatorIndex string `json:"validator_index" validate:"required,number,gte=0"`
SyncCommitteeIndices []string `json:"sync_committee_indices" validate:"required,dive,number,gte=0"`
UntilEpoch string `json:"until_epoch" validate:"required,number,gte=0"`
}
type BeaconCommitteeSubscription struct {
ValidatorIndex string `json:"validator_index" validate:"required,number,gte=0"`
CommitteeIndex string `json:"committee_index" validate:"required,number,gte=0"`
CommitteesAtSlot string `json:"committees_at_slot" validate:"required,number,gte=0"`
Slot string `json:"slot" validate:"required,number,gte=0"`
IsAggregator bool `json:"is_aggregator"`
}
func (s *SignedContributionAndProof) ToConsensus() (*eth.SignedContributionAndProof, error) {
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Message")
}
sig, err := hexutil.Decode(s.Signature)
if err != nil {
return nil, NewDecodeError(err, "Signature")
}
return &eth.SignedContributionAndProof{
Message: msg,
Signature: sig,
}, nil
}
func (c *ContributionAndProof) ToConsensus() (*eth.ContributionAndProof, error) {
contribution, err := c.Contribution.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Contribution")
}
aggregatorIndex, err := strconv.ParseUint(c.AggregatorIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "AggregatorIndex")
}
selectionProof, err := hexutil.Decode(c.SelectionProof)
if err != nil {
return nil, NewDecodeError(err, "SelectionProof")
}
return &eth.ContributionAndProof{
AggregatorIndex: primitives.ValidatorIndex(aggregatorIndex),
Contribution: contribution,
SelectionProof: selectionProof,
}, nil
}
func (s *SyncCommitteeContribution) ToConsensus() (*eth.SyncCommitteeContribution, error) {
slot, err := strconv.ParseUint(s.Slot, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "Slot")
}
bbRoot, err := hexutil.Decode(s.BeaconBlockRoot)
if err != nil {
return nil, NewDecodeError(err, "BeaconBlockRoot")
}
subcommitteeIndex, err := strconv.ParseUint(s.SubcommitteeIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "SubcommitteeIndex")
}
aggBits, err := hexutil.Decode(s.AggregationBits)
if err != nil {
return nil, NewDecodeError(err, "AggregationBits")
}
sig, err := hexutil.Decode(s.Signature)
if err != nil {
return nil, NewDecodeError(err, "Signature")
}
return &eth.SyncCommitteeContribution{
Slot: primitives.Slot(slot),
BlockRoot: bbRoot,
SubcommitteeIndex: subcommitteeIndex,
AggregationBits: aggBits,
Signature: sig,
}, nil
}
func (s *SignedAggregateAttestationAndProof) ToConsensus() (*eth.SignedAggregateAttestationAndProof, error) {
msg, err := s.Message.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Message")
}
sig, err := hexutil.Decode(s.Signature)
if err != nil {
return nil, NewDecodeError(err, "Signature")
}
return &eth.SignedAggregateAttestationAndProof{
Message: msg,
Signature: sig,
}, nil
}
func (a *AggregateAttestationAndProof) ToConsensus() (*eth.AggregateAttestationAndProof, error) {
aggIndex, err := strconv.ParseUint(a.AggregatorIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "AggregatorIndex")
}
agg, err := a.Aggregate.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Aggregate")
}
proof, err := hexutil.Decode(a.SelectionProof)
if err != nil {
return nil, NewDecodeError(err, "SelectionProof")
}
return &eth.AggregateAttestationAndProof{
AggregatorIndex: primitives.ValidatorIndex(aggIndex),
Aggregate: agg,
SelectionProof: proof,
}, nil
}
func (a *Attestation) ToConsensus() (*eth.Attestation, error) {
aggBits, err := hexutil.Decode(a.AggregationBits)
if err != nil {
return nil, NewDecodeError(err, "AggregationBits")
}
data, err := a.Data.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Data")
}
sig, err := hexutil.Decode(a.Signature)
if err != nil {
return nil, NewDecodeError(err, "Signature")
}
return &eth.Attestation{
AggregationBits: aggBits,
Data: data,
Signature: sig,
}, nil
}
func (a *AttestationData) ToConsensus() (*eth.AttestationData, error) {
slot, err := strconv.ParseUint(a.Slot, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "Slot")
}
committeeIndex, err := strconv.ParseUint(a.CommitteeIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "CommitteeIndex")
}
bbRoot, err := hexutil.Decode(a.BeaconBlockRoot)
if err != nil {
return nil, NewDecodeError(err, "BeaconBlockRoot")
}
source, err := a.Source.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Source")
}
target, err := a.Target.ToConsensus()
if err != nil {
return nil, NewDecodeError(err, "Target")
}
return &eth.AttestationData{
Slot: primitives.Slot(slot),
CommitteeIndex: primitives.CommitteeIndex(committeeIndex),
BeaconBlockRoot: bbRoot,
Source: source,
Target: target,
}, nil
}
func (c *Checkpoint) ToConsensus() (*eth.Checkpoint, error) {
epoch, err := strconv.ParseUint(c.Epoch, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "Epoch")
}
root, err := hexutil.Decode(c.Root)
if err != nil {
return nil, NewDecodeError(err, "Root")
}
return &eth.Checkpoint{
Epoch: primitives.Epoch(epoch),
Root: root,
}, nil
}
func (s *SyncCommitteeSubscription) ToConsensus() (*validator.SyncCommitteeSubscription, error) {
index, err := strconv.ParseUint(s.ValidatorIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "ValidatorIndex")
}
scIndices := make([]uint64, len(s.SyncCommitteeIndices))
for i, ix := range s.SyncCommitteeIndices {
scIndices[i], err = strconv.ParseUint(ix, 10, 64)
if err != nil {
return nil, NewDecodeError(err, fmt.Sprintf("SyncCommitteeIndices[%d]", i))
}
}
epoch, err := strconv.ParseUint(s.UntilEpoch, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "UntilEpoch")
}
return &validator.SyncCommitteeSubscription{
ValidatorIndex: primitives.ValidatorIndex(index),
SyncCommitteeIndices: scIndices,
UntilEpoch: primitives.Epoch(epoch),
}, nil
}
func (b *BeaconCommitteeSubscription) ToConsensus() (*validator.BeaconCommitteeSubscription, error) {
valIndex, err := strconv.ParseUint(b.ValidatorIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "ValidatorIndex")
}
committeeIndex, err := strconv.ParseUint(b.CommitteeIndex, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "CommitteeIndex")
}
committeesAtSlot, err := strconv.ParseUint(b.CommitteesAtSlot, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "CommitteesAtSlot")
}
slot, err := strconv.ParseUint(b.Slot, 10, 64)
if err != nil {
return nil, NewDecodeError(err, "Slot")
}
return &validator.BeaconCommitteeSubscription{
ValidatorIndex: primitives.ValidatorIndex(valIndex),
CommitteeIndex: primitives.CommitteeIndex(committeeIndex),
CommitteesAtSlot: committeesAtSlot,
Slot: primitives.Slot(slot),
IsAggregator: b.IsAggregator,
}, nil
}
// SyncDetails contains information about node sync status.
type SyncDetails struct {
HeadSlot string `json:"head_slot"`
SyncDistance string `json:"sync_distance"`
IsSyncing bool `json:"is_syncing"`
IsOptimistic bool `json:"is_optimistic"`
ElOffline bool `json:"el_offline"`
}
// SyncDetailsContainer is a wrapper for Data.
type SyncDetailsContainer struct {
Data *SyncDetails `json:"data"`
}

View File

@@ -3,22 +3,27 @@ load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"handlers.go",
"server.go",
"structs.go",
"validator.go",
],
importpath = "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/validator",
visibility = ["//beacon-chain:__subpackages__"],
visibility = ["//visibility:public"],
deps = [
"//beacon-chain/blockchain:go_default_library",
"//beacon-chain/builder:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/feed/operation:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/db:go_default_library",
"//beacon-chain/db/kv:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/helpers:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/lookup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
@@ -26,7 +31,9 @@ go_library(
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//consensus-types/validator:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/migration:go_default_library",
@@ -34,6 +41,7 @@ go_library(
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_go_playground_validator_v10//:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@io_bazel_rules_go//proto/wkt:empty_go_proto",
@@ -46,26 +54,36 @@ go_library(
go_test(
name = "go_default_test",
srcs = ["validator_test.go"],
srcs = [
"handlers_test.go",
"validator_test.go",
],
embed = [":go_default_library"],
deps = [
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/builder/testing:go_default_library",
"//beacon-chain/cache:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/core/transition:go_default_library",
"//beacon-chain/db/testing:go_default_library",
"//beacon-chain/forkchoice/doubly-linked-tree:go_default_library",
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/rpc/eth/shared:go_default_library",
"//beacon-chain/rpc/prysm/v1alpha1/validator:go_default_library",
"//beacon-chain/rpc/testutil:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//config/fieldparams:go_default_library",
"//config/params:go_default_library",
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//network/http:go_default_library",
"//proto/eth/v1:go_default_library",
"//proto/eth/v2:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
@@ -75,9 +93,8 @@ go_test(
"//testing/util:go_default_library",
"//time/slots:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_ethereum_go_ethereum//common/hexutil:go_default_library",
"@com_github_golang_mock//gomock:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@com_github_sirupsen_logrus//hooks/test:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)

View File

@@ -0,0 +1,453 @@
package validator
import (
"bytes"
"encoding/json"
"fmt"
"net/http"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/go-playground/validator/v10"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
validator2 "github.com/prysmaticlabs/prysm/v4/consensus-types/validator"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
ethpbalpha "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
)
// GetAggregateAttestation aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
func (s *Server) GetAggregateAttestation(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.GetAggregateAttestation")
defer span.End()
attDataRoot := r.URL.Query().Get("attestation_data_root")
valid := shared.ValidateHex(w, "Attestation data root", attDataRoot)
if !valid {
return
}
rawSlot := r.URL.Query().Get("slot")
slot, valid := shared.ValidateUint(w, "Slot", rawSlot)
if !valid {
return
}
if err := s.AttestationsPool.AggregateUnaggregatedAttestations(ctx); err != nil {
http2.HandleError(w, "Could not aggregate unaggregated attestations: "+err.Error(), http.StatusBadRequest)
return
}
allAtts := s.AttestationsPool.AggregatedAttestations()
var bestMatchingAtt *ethpbalpha.Attestation
for _, att := range allAtts {
if att.Data.Slot == primitives.Slot(slot) {
root, err := att.Data.HashTreeRoot()
if err != nil {
http2.HandleError(w, "Could not get attestation data root: "+err.Error(), http.StatusInternalServerError)
return
}
attDataRootBytes, err := hexutil.Decode(attDataRoot)
if err != nil {
http2.HandleError(w, "Could not decode attestation data root into bytes: "+err.Error(), http.StatusBadRequest)
return
}
if bytes.Equal(root[:], attDataRootBytes) {
if bestMatchingAtt == nil || len(att.AggregationBits) > len(bestMatchingAtt.AggregationBits) {
bestMatchingAtt = att
}
}
}
}
if bestMatchingAtt == nil {
http2.HandleError(w, "No matching attestation found", http.StatusNotFound)
return
}
response := &AggregateAttestationResponse{
Data: &shared.Attestation{
AggregationBits: hexutil.Encode(bestMatchingAtt.AggregationBits),
Data: &shared.AttestationData{
Slot: strconv.FormatUint(uint64(bestMatchingAtt.Data.Slot), 10),
CommitteeIndex: strconv.FormatUint(uint64(bestMatchingAtt.Data.CommitteeIndex), 10),
BeaconBlockRoot: hexutil.Encode(bestMatchingAtt.Data.BeaconBlockRoot),
Source: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(bestMatchingAtt.Data.Source.Epoch), 10),
Root: hexutil.Encode(bestMatchingAtt.Data.Source.Root),
},
Target: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(bestMatchingAtt.Data.Target.Epoch), 10),
Root: hexutil.Encode(bestMatchingAtt.Data.Target.Root),
},
},
Signature: hexutil.Encode(bestMatchingAtt.Signature),
}}
http2.WriteJson(w, response)
}
// SubmitContributionAndProofs publishes multiple signed sync committee contribution and proofs.
func (s *Server) SubmitContributionAndProofs(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitContributionAndProofs")
defer span.End()
if r.Body == http.NoBody {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
var req SubmitContributionAndProofsRequest
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(req.Data) == 0 {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
validate := validator.New()
if err := validate.Struct(req); err != nil {
http2.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
for _, item := range req.Data {
consensusItem, err := item.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request contribution to consensus contribution: "+err.Error(), http.StatusBadRequest)
return
}
rpcError := s.CoreService.SubmitSignedContributionAndProof(ctx, consensusItem)
if rpcError != nil {
http2.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
}
}
}
// SubmitAggregateAndProofs verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
func (s *Server) SubmitAggregateAndProofs(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitAggregateAndProofs")
defer span.End()
if r.Body == http.NoBody {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
var req SubmitAggregateAndProofsRequest
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(req.Data) == 0 {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
validate := validator.New()
if err := validate.Struct(req); err != nil {
http2.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
broadcastFailed := false
for _, item := range req.Data {
consensusItem, err := item.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request aggregate to consensus aggregate: "+err.Error(), http.StatusBadRequest)
return
}
rpcError := s.CoreService.SubmitSignedAggregateSelectionProof(
ctx,
&ethpbalpha.SignedAggregateSubmitRequest{SignedAggregateAndProof: consensusItem},
)
if rpcError != nil {
_, ok := rpcError.Err.(*core.AggregateBroadcastFailedError)
if ok {
broadcastFailed = true
} else {
http2.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
return
}
}
}
if broadcastFailed {
http2.HandleError(w, "Could not broadcast one or more signed aggregated attestations", http.StatusInternalServerError)
}
}
// SubmitSyncCommitteeSubscription subscribe to a number of sync committee subnets.
//
// Subscribing to sync committee subnets is an action performed by VC to enable
// network participation, and only required if the VC has an active
// validator in an active sync committee.
func (s *Server) SubmitSyncCommitteeSubscription(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitSyncCommitteeSubscription")
defer span.End()
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
return
}
if r.Body == http.NoBody {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
var req SubmitSyncCommitteeSubscriptionsRequest
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(req.Data) == 0 {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
validate := validator.New()
if err := validate.Struct(req); err != nil {
http2.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
st, err := s.HeadFetcher.HeadStateReadOnly(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
currEpoch := slots.ToEpoch(st.Slot())
validators := make([]state.ReadOnlyValidator, len(req.Data))
subscriptions := make([]*validator2.SyncCommitteeSubscription, len(req.Data))
for i, item := range req.Data {
consensusItem, err := item.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request subscription to consensus subscription: "+err.Error(), http.StatusBadRequest)
return
}
subscriptions[i] = consensusItem
val, err := st.ValidatorAtIndexReadOnly(consensusItem.ValidatorIndex)
if err != nil {
http2.HandleError(
w,
fmt.Sprintf("Could not get validator at index %d: %s", consensusItem.ValidatorIndex, err.Error()),
http.StatusInternalServerError,
)
return
}
valStatus, err := rpchelpers.ValidatorSubStatus(val, currEpoch)
if err != nil {
http2.HandleError(
w,
fmt.Sprintf("Could not get validator status at index %d: %s", consensusItem.ValidatorIndex, err.Error()),
http.StatusInternalServerError,
)
return
}
if valStatus != validator2.ActiveOngoing && valStatus != validator2.ActiveExiting {
http2.HandleError(
w,
fmt.Sprintf("Validator at index %d is not active or exiting", consensusItem.ValidatorIndex),
http.StatusBadRequest,
)
return
}
validators[i] = val
}
startEpoch, err := slots.SyncCommitteePeriodStartEpoch(currEpoch)
if err != nil {
http2.HandleError(w, "Could not get sync committee period start epoch: "+err.Error(), http.StatusInternalServerError)
return
}
for i, sub := range subscriptions {
if sub.UntilEpoch <= currEpoch {
http2.HandleError(
w,
fmt.Sprintf("Epoch for subscription at index %d is in the past. It must be at least %d", i, currEpoch+1),
http.StatusBadRequest,
)
return
}
maxValidUntilEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod*2
if sub.UntilEpoch > maxValidUntilEpoch {
http2.HandleError(
w,
fmt.Sprintf("Epoch for subscription at index %d is too far in the future. It can be at most %d", i, maxValidUntilEpoch),
http.StatusBadRequest,
)
return
}
}
for i, sub := range subscriptions {
pubkey48 := validators[i].PublicKey()
// Handle overflow in the event current epoch is less than end epoch.
// This is an impossible condition, so it is a defensive check.
epochsToWatch, err := sub.UntilEpoch.SafeSub(uint64(startEpoch))
if err != nil {
epochsToWatch = 0
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
totalDuration := epochDuration * time.Duration(epochsToWatch)
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey48[:], startEpoch, sub.SyncCommitteeIndices, totalDuration)
}
}
// SubmitBeaconCommitteeSubscription searches using discv5 for peers related to the provided subnet information
// and replaces current peers with those ones if necessary.
func (s *Server) SubmitBeaconCommitteeSubscription(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.SubmitBeaconCommitteeSubscription")
defer span.End()
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
return
}
if r.Body == http.NoBody {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
var req SubmitBeaconCommitteeSubscriptionsRequest
if err := json.NewDecoder(r.Body).Decode(&req.Data); err != nil {
http2.HandleError(w, "Could not decode request body: "+err.Error(), http.StatusBadRequest)
return
}
if len(req.Data) == 0 {
http2.HandleError(w, "No data submitted", http.StatusBadRequest)
return
}
validate := validator.New()
if err := validate.Struct(req); err != nil {
http2.HandleError(w, err.Error(), http.StatusBadRequest)
return
}
st, err := s.HeadFetcher.HeadStateReadOnly(ctx)
if err != nil {
http2.HandleError(w, "Could not get head state: "+err.Error(), http.StatusInternalServerError)
return
}
// Verify validators at the beginning to return early if request is invalid.
validators := make([]state.ReadOnlyValidator, len(req.Data))
subscriptions := make([]*validator2.BeaconCommitteeSubscription, len(req.Data))
for i, item := range req.Data {
consensusItem, err := item.ToConsensus()
if err != nil {
http2.HandleError(w, "Could not convert request subscription to consensus subscription: "+err.Error(), http.StatusBadRequest)
return
}
subscriptions[i] = consensusItem
val, err := st.ValidatorAtIndexReadOnly(consensusItem.ValidatorIndex)
if err != nil {
if outOfRangeErr, ok := err.(*state_native.ValidatorIndexOutOfRangeError); ok {
http2.HandleError(w, "Could not get validator: "+outOfRangeErr.Error(), http.StatusBadRequest)
return
}
http2.HandleError(w, "Could not get validator: "+err.Error(), http.StatusInternalServerError)
return
}
validators[i] = val
}
fetchValsLen := func(slot primitives.Slot) (uint64, error) {
wantedEpoch := slots.ToEpoch(slot)
vals, err := s.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
if err != nil {
return 0, err
}
return uint64(len(vals)), nil
}
// Request the head validator indices of epoch represented by the first requested slot.
currValsLen, err := fetchValsLen(subscriptions[0].Slot)
if err != nil {
http2.HandleError(w, "Could not retrieve head validator length: "+err.Error(), http.StatusInternalServerError)
return
}
currEpoch := slots.ToEpoch(subscriptions[0].Slot)
for _, sub := range subscriptions {
// If epoch has changed, re-request active validators length
if currEpoch != slots.ToEpoch(sub.Slot) {
currValsLen, err = fetchValsLen(sub.Slot)
if err != nil {
http2.HandleError(w, "Could not retrieve head validator length: "+err.Error(), http.StatusInternalServerError)
return
}
currEpoch = slots.ToEpoch(sub.Slot)
}
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(currValsLen, sub.CommitteeIndex, sub.Slot)
cache.SubnetIDs.AddAttesterSubnetID(sub.Slot, subnet)
if sub.IsAggregator {
cache.SubnetIDs.AddAggregatorSubnetID(sub.Slot, subnet)
}
}
for _, val := range validators {
valStatus, err := rpchelpers.ValidatorStatus(val, currEpoch)
if err != nil {
http2.HandleError(w, "Could not retrieve validator status: "+err.Error(), http.StatusInternalServerError)
return
}
pubkey := val.PublicKey()
core.AssignValidatorToSubnet(pubkey[:], valStatus)
}
}
// GetAttestationData requests that the beacon node produces attestation data for
// the requested committee index and slot based on the nodes current head.
func (s *Server) GetAttestationData(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), "validator.GetAttestationData")
defer span.End()
if shared.IsSyncing(ctx, w, s.SyncChecker, s.HeadFetcher, s.TimeFetcher, s.OptimisticModeFetcher) {
return
}
if isOptimistic, err := shared.IsOptimistic(ctx, w, s.OptimisticModeFetcher); isOptimistic || err != nil {
return
}
rawSlot := r.URL.Query().Get("slot")
slot, valid := shared.ValidateUint(w, "Slot", rawSlot)
if !valid {
return
}
rawCommitteeIndex := r.URL.Query().Get("committee_index")
committeeIndex, valid := shared.ValidateUint(w, "Committee Index", rawCommitteeIndex)
if !valid {
return
}
attestationData, rpcError := s.CoreService.GetAttestationData(ctx, &ethpbalpha.AttestationDataRequest{
Slot: primitives.Slot(slot),
CommitteeIndex: primitives.CommitteeIndex(committeeIndex),
})
if rpcError != nil {
http2.HandleError(w, rpcError.Err.Error(), core.ErrorReasonToHTTP(rpcError.Reason))
return
}
response := &GetAttestationDataResponse{
Data: &shared.AttestationData{
Slot: strconv.FormatUint(uint64(attestationData.Slot), 10),
CommitteeIndex: strconv.FormatUint(uint64(attestationData.CommitteeIndex), 10),
BeaconBlockRoot: hexutil.Encode(attestationData.BeaconBlockRoot),
Source: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(attestationData.Source.Epoch), 10),
Root: hexutil.Encode(attestationData.Source.Root),
},
Target: &shared.Checkpoint{
Epoch: strconv.FormatUint(uint64(attestationData.Target.Epoch), 10),
Root: hexutil.Encode(attestationData.Target.Root),
},
},
}
http2.WriteJson(w, response)
}

File diff suppressed because it is too large Load Diff

View File

@@ -4,10 +4,12 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/lookup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -30,4 +32,6 @@ type Server struct {
ChainInfoFetcher blockchain.ChainInfoFetcher
BeaconDB db.HeadAccessDatabase
BlockBuilder builder.BlockBuilder
OperationNotifier operation.Notifier
CoreService *core.Service
}

View File

@@ -0,0 +1,27 @@
package validator
import "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/shared"
type AggregateAttestationResponse struct {
Data *shared.Attestation `json:"data"`
}
type SubmitContributionAndProofsRequest struct {
Data []*shared.SignedContributionAndProof `json:"data" validate:"required,dive"`
}
type SubmitAggregateAndProofsRequest struct {
Data []*shared.SignedAggregateAttestationAndProof `json:"data" validate:"required,dive"`
}
type SubmitSyncCommitteeSubscriptionsRequest struct {
Data []*shared.SyncCommitteeSubscription `json:"data" validate:"required,dive"`
}
type SubmitBeaconCommitteeSubscriptionsRequest struct {
Data []*shared.BeaconCommitteeSubscription `json:"data" validate:"required,dive"`
}
type GetAttestationDataResponse struct {
Data *shared.AttestationData `json:"data"`
}

View File

@@ -6,19 +6,16 @@ import (
"fmt"
"sort"
"strconv"
"time"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/common/hexutil"
"github.com/golang/protobuf/ptypes/empty"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db/kv"
rpchelpers "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -783,261 +780,6 @@ func (vs *Server) SubmitValidatorRegistration(ctx context.Context, reg *ethpbv1.
return &empty.Empty{}, nil
}
// ProduceAttestationData requests that the beacon node produces attestation data for
// the requested committee index and slot based on the nodes current head.
func (vs *Server) ProduceAttestationData(ctx context.Context, req *ethpbv1.ProduceAttestationDataRequest) (*ethpbv1.ProduceAttestationDataResponse, error) {
ctx, span := trace.StartSpan(ctx, "validator.ProduceAttestationData")
defer span.End()
v1alpha1req := &ethpbalpha.AttestationDataRequest{
Slot: req.Slot,
CommitteeIndex: req.CommitteeIndex,
}
v1alpha1resp, err := vs.V1Alpha1Server.GetAttestationData(ctx, v1alpha1req)
if err != nil {
// We simply return err because it's already of a gRPC error type.
return nil, err
}
attData := migration.V1Alpha1AttDataToV1(v1alpha1resp)
return &ethpbv1.ProduceAttestationDataResponse{Data: attData}, nil
}
// GetAggregateAttestation aggregates all attestations matching the given attestation data root and slot, returning the aggregated result.
func (vs *Server) GetAggregateAttestation(ctx context.Context, req *ethpbv1.AggregateAttestationRequest) (*ethpbv1.AggregateAttestationResponse, error) {
_, span := trace.StartSpan(ctx, "validator.GetAggregateAttestation")
defer span.End()
if err := vs.AttestationsPool.AggregateUnaggregatedAttestations(ctx); err != nil {
return nil, status.Errorf(codes.Internal, "Could not aggregate unaggregated attestations: %v", err)
}
allAtts := vs.AttestationsPool.AggregatedAttestations()
var bestMatchingAtt *ethpbalpha.Attestation
for _, att := range allAtts {
if att.Data.Slot == req.Slot {
root, err := att.Data.HashTreeRoot()
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get attestation data root: %v", err)
}
if bytes.Equal(root[:], req.AttestationDataRoot) {
if bestMatchingAtt == nil || len(att.AggregationBits) > len(bestMatchingAtt.AggregationBits) {
bestMatchingAtt = att
}
}
}
}
if bestMatchingAtt == nil {
return nil, status.Error(codes.NotFound, "No matching attestation found")
}
return &ethpbv1.AggregateAttestationResponse{
Data: migration.V1Alpha1AttestationToV1(bestMatchingAtt),
}, nil
}
// SubmitAggregateAndProofs verifies given aggregate and proofs and publishes them on appropriate gossipsub topic.
func (vs *Server) SubmitAggregateAndProofs(ctx context.Context, req *ethpbv1.SubmitAggregateAndProofsRequest) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "validator.SubmitAggregateAndProofs")
defer span.End()
for _, agg := range req.Data {
if agg == nil || agg.Message == nil || agg.Message.Aggregate == nil || agg.Message.Aggregate.Data == nil {
return nil, status.Error(codes.InvalidArgument, "Signed aggregate request can't be nil")
}
sigLen := fieldparams.BLSSignatureLength
emptySig := make([]byte, sigLen)
if bytes.Equal(agg.Signature, emptySig) || bytes.Equal(agg.Message.SelectionProof, emptySig) || bytes.Equal(agg.Message.Aggregate.Signature, emptySig) {
return nil, status.Error(codes.InvalidArgument, "Signed signatures can't be zero hashes")
}
if len(agg.Signature) != sigLen || len(agg.Message.Aggregate.Signature) != sigLen {
return nil, status.Errorf(codes.InvalidArgument, "Incorrect signature length. Expected %d bytes", sigLen)
}
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(agg.Message.Aggregate.Data.Slot,
vs.TimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, status.Error(codes.InvalidArgument, "Attestation slot is no longer valid from current time")
}
}
broadcastFailed := false
for _, agg := range req.Data {
v1alpha1Agg := migration.V1SignedAggregateAttAndProofToV1Alpha1(agg)
if err := vs.Broadcaster.Broadcast(ctx, v1alpha1Agg); err != nil {
broadcastFailed = true
} else {
log.WithFields(log.Fields{
"slot": agg.Message.Aggregate.Data.Slot,
"committeeIndex": agg.Message.Aggregate.Data.Index,
"validatorIndex": agg.Message.AggregatorIndex,
"aggregatedCount": agg.Message.Aggregate.AggregationBits.Count(),
}).Debug("Broadcasting aggregated attestation and proof")
}
}
if broadcastFailed {
return nil, status.Errorf(
codes.Internal,
"Could not broadcast one or more signed aggregated attestations")
}
return &emptypb.Empty{}, nil
}
// SubmitBeaconCommitteeSubscription searches using discv5 for peers related to the provided subnet information
// and replaces current peers with those ones if necessary.
func (vs *Server) SubmitBeaconCommitteeSubscription(ctx context.Context, req *ethpbv1.SubmitBeaconCommitteeSubscriptionsRequest) (*emptypb.Empty, error) {
ctx, span := trace.StartSpan(ctx, "validator.SubmitBeaconCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
if len(req.Data) == 0 {
return nil, status.Error(codes.InvalidArgument, "No subscriptions provided")
}
s, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
// Verify validators at the beginning to return early if request is invalid.
validators := make([]state.ReadOnlyValidator, len(req.Data))
for i, sub := range req.Data {
val, err := s.ValidatorAtIndexReadOnly(sub.ValidatorIndex)
if outOfRangeErr, ok := err.(*state_native.ValidatorIndexOutOfRangeError); ok {
return nil, status.Errorf(codes.InvalidArgument, "Invalid validator ID: %v", outOfRangeErr)
}
validators[i] = val
}
fetchValsLen := func(slot primitives.Slot) (uint64, error) {
wantedEpoch := slots.ToEpoch(slot)
vals, err := vs.HeadFetcher.HeadValidatorsIndices(ctx, wantedEpoch)
if err != nil {
return 0, err
}
return uint64(len(vals)), nil
}
// Request the head validator indices of epoch represented by the first requested slot.
currValsLen, err := fetchValsLen(req.Data[0].Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head validator length: %v", err)
}
currEpoch := slots.ToEpoch(req.Data[0].Slot)
for _, sub := range req.Data {
// If epoch has changed, re-request active validators length
if currEpoch != slots.ToEpoch(sub.Slot) {
currValsLen, err = fetchValsLen(sub.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head validator length: %v", err)
}
currEpoch = slots.ToEpoch(sub.Slot)
}
subnet := helpers.ComputeSubnetFromCommitteeAndSlot(currValsLen, sub.CommitteeIndex, sub.Slot)
cache.SubnetIDs.AddAttesterSubnetID(sub.Slot, subnet)
if sub.IsAggregator {
cache.SubnetIDs.AddAggregatorSubnetID(sub.Slot, subnet)
}
}
for _, val := range validators {
valStatus, err := rpchelpers.ValidatorStatus(val, currEpoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve validator status: %v", err)
}
pubkey := val.PublicKey()
v1alpha1Req := &ethpbalpha.AssignValidatorToSubnetRequest{PublicKey: pubkey[:], Status: v1ValidatorStatusToV1Alpha1(valStatus)}
_, err = vs.V1Alpha1Server.AssignValidatorToSubnet(ctx, v1alpha1Req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not assign validator to subnet")
}
}
return &emptypb.Empty{}, nil
}
// SubmitSyncCommitteeSubscription subscribe to a number of sync committee subnets.
//
// Subscribing to sync committee subnets is an action performed by VC to enable
// network participation in Altair networks, and only required if the VC has an active
// validator in an active sync committee.
func (vs *Server) SubmitSyncCommitteeSubscription(ctx context.Context, req *ethpbv2.SubmitSyncCommitteeSubscriptionsRequest) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "validator.SubmitSyncCommitteeSubscription")
defer span.End()
if err := rpchelpers.ValidateSyncGRPC(ctx, vs.SyncChecker, vs.HeadFetcher, vs.TimeFetcher, vs.OptimisticModeFetcher); err != nil {
// We simply return the error because it's already a gRPC error.
return nil, err
}
if len(req.Data) == 0 {
return nil, status.Error(codes.InvalidArgument, "No subscriptions provided")
}
s, err := vs.HeadFetcher.HeadStateReadOnly(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get head state: %v", err)
}
currEpoch := slots.ToEpoch(s.Slot())
validators := make([]state.ReadOnlyValidator, len(req.Data))
for i, sub := range req.Data {
val, err := s.ValidatorAtIndexReadOnly(sub.ValidatorIndex)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get validator at index %d: %v", sub.ValidatorIndex, err)
}
valStatus, err := rpchelpers.ValidatorSubStatus(val, currEpoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get validator status at index %d: %v", sub.ValidatorIndex, err)
}
if valStatus != ethpbv1.ValidatorStatus_ACTIVE_ONGOING && valStatus != ethpbv1.ValidatorStatus_ACTIVE_EXITING {
return nil, status.Errorf(codes.InvalidArgument, "Validator at index %d is not active or exiting: %v", sub.ValidatorIndex, err)
}
validators[i] = val
}
startEpoch, err := slots.SyncCommitteePeriodStartEpoch(currEpoch)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get sync committee period start epoch: %v.", err)
}
for i, sub := range req.Data {
if sub.UntilEpoch <= currEpoch {
return nil, status.Errorf(codes.InvalidArgument, "Epoch for subscription at index %d is in the past. It must be at least %d", i, currEpoch+1)
}
maxValidUntilEpoch := startEpoch + params.BeaconConfig().EpochsPerSyncCommitteePeriod*2
if sub.UntilEpoch > maxValidUntilEpoch {
return nil, status.Errorf(
codes.InvalidArgument,
"Epoch for subscription at index %d is too far in the future. It can be at most %d",
i,
maxValidUntilEpoch,
)
}
}
for i, sub := range req.Data {
pubkey48 := validators[i].PublicKey()
// Handle overflow in the event current epoch is less than end epoch.
// This is an impossible condition, so it is a defensive check.
epochsToWatch, err := sub.UntilEpoch.SafeSub(uint64(startEpoch))
if err != nil {
epochsToWatch = 0
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot)) * time.Second
totalDuration := epochDuration * time.Duration(epochsToWatch)
cache.SyncSubnetIDs.AddSyncCommitteeSubnets(pubkey48[:], startEpoch, sub.SyncCommitteeIndices, totalDuration)
}
return &empty.Empty{}, nil
}
// ProduceSyncCommitteeContribution requests that the beacon node produce a sync committee contribution.
func (vs *Server) ProduceSyncCommitteeContribution(
ctx context.Context,
@@ -1076,36 +818,6 @@ func (vs *Server) ProduceSyncCommitteeContribution(
}, nil
}
// SubmitContributionAndProofs publishes multiple signed sync committee contribution and proofs.
func (vs *Server) SubmitContributionAndProofs(ctx context.Context, req *ethpbv2.SubmitContributionAndProofsRequest) (*empty.Empty, error) {
ctx, span := trace.StartSpan(ctx, "validator.SubmitContributionAndProofs")
defer span.End()
for _, item := range req.Data {
v1alpha1Req := &ethpbalpha.SignedContributionAndProof{
Message: &ethpbalpha.ContributionAndProof{
AggregatorIndex: item.Message.AggregatorIndex,
Contribution: &ethpbalpha.SyncCommitteeContribution{
Slot: item.Message.Contribution.Slot,
BlockRoot: item.Message.Contribution.BeaconBlockRoot,
SubcommitteeIndex: item.Message.Contribution.SubcommitteeIndex,
AggregationBits: item.Message.Contribution.AggregationBits,
Signature: item.Message.Contribution.Signature,
},
SelectionProof: item.Message.SelectionProof,
},
Signature: item.Signature,
}
_, err := vs.V1Alpha1Server.SubmitSignedContributionAndProof(ctx, v1alpha1Req)
// We simply return err because it's already of a gRPC error type.
if err != nil {
return nil, err
}
}
return &empty.Empty{}, nil
}
// GetLiveness requests the beacon node to indicate if a validator has been observed to be live in a given epoch.
// The beacon node might detect liveness by observing messages from the validator on the network,
// in the beacon chain, from its API or from any other source.
@@ -1216,20 +928,6 @@ func proposalDependentRoot(s state.BeaconState, epoch primitives.Epoch) ([]byte,
return root, nil
}
// Logic based on https://hackmd.io/ofFJ5gOmQpu1jjHilHbdQQ
func v1ValidatorStatusToV1Alpha1(valStatus ethpbv1.ValidatorStatus) ethpbalpha.ValidatorStatus {
switch valStatus {
case ethpbv1.ValidatorStatus_ACTIVE:
return ethpbalpha.ValidatorStatus_ACTIVE
case ethpbv1.ValidatorStatus_PENDING:
return ethpbalpha.ValidatorStatus_PENDING
case ethpbv1.ValidatorStatus_WITHDRAWAL:
return ethpbalpha.ValidatorStatus_EXITED
default:
return ethpbalpha.ValidatorStatus_UNKNOWN_STATUS
}
}
func syncCommitteeDutiesLastValidEpoch(currentEpoch primitives.Epoch) primitives.Epoch {
currentSyncPeriodIndex := currentEpoch / params.BeaconConfig().EpochsPerSyncCommitteePeriod
// Return the last epoch of the next sync committee.

File diff suppressed because it is too large Load Diff

View File

@@ -17,7 +17,7 @@ go_library(
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/sync:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_libp2p_go_libp2p//core/network:go_default_library",
"@com_github_libp2p_go_libp2p//core/peer:go_default_library",
@@ -36,7 +36,7 @@ go_test(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",

View File

@@ -12,7 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
eth "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -24,11 +24,11 @@ func (s *Server) ListTrustedPeer(w http.ResponseWriter, r *http.Request) {
for _, id := range allIds {
p, err := httpPeerInfo(peerStatus, id)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not get peer info").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
// peers added into trusted set but never connected should also be listed
@@ -44,37 +44,37 @@ func (s *Server) ListTrustedPeer(w http.ResponseWriter, r *http.Request) {
allPeers = append(allPeers, p)
}
response := &PeersResponse{Peers: allPeers}
network.WriteJson(w, response)
http2.WriteJson(w, response)
}
// AddTrustedPeer adds a new peer into node's trusted peer set by Multiaddr
func (s *Server) AddTrustedPeer(w http.ResponseWriter, r *http.Request) {
body, err := io.ReadAll(r.Body)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not read request body").Error(),
Code: http.StatusInternalServerError,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
var addrRequest *AddrRequest
err = json.Unmarshal(body, &addrRequest)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not decode request body into peer address").Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
info, err := peer.AddrInfoFromString(addrRequest.Addr)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not derive peer info from multiaddress").Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}
@@ -98,11 +98,11 @@ func (s *Server) RemoveTrustedPeer(w http.ResponseWriter, r *http.Request) {
id := segments[len(segments)-1]
peerId, err := peer.Decode(id)
if err != nil {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: errors.Wrapf(err, "Could not decode peer id").Error(),
Code: http.StatusBadRequest,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
return
}

View File

@@ -17,7 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/peers"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
@@ -188,7 +188,7 @@ func TestAddTrustedPeer_EmptyBody(t *testing.T) {
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AddTrustedPeer(writer, request)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.Equal(t, "Could not decode request body into peer address: unexpected end of JSON input", e.Message)
@@ -213,7 +213,7 @@ func TestAddTrustedPeer_BadAddress(t *testing.T) {
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.AddTrustedPeer(writer, request)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.StringContains(t, "Could not derive peer info from multiaddress", e.Message)
@@ -243,7 +243,7 @@ func TestRemoveTrustedPeer_EmptyParameter(t *testing.T) {
writer := httptest.NewRecorder()
writer.Body = &bytes.Buffer{}
s.RemoveTrustedPeer(writer, request)
e := &network.DefaultErrorJson{}
e := &http2.DefaultErrorJson{}
require.NoError(t, json.Unmarshal(writer.Body.Bytes(), e))
assert.Equal(t, http.StatusBadRequest, writer.Code)
assert.Equal(t, "Could not decode peer id: failed to parse peer ID: invalid cid: cid too short", e.Message)

View File

@@ -103,6 +103,7 @@ go_test(
"//beacon-chain/operations/attestations:go_default_library",
"//beacon-chain/operations/slashings:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/state-native:go_default_library",
"//beacon-chain/state/stategen:go_default_library",

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/slashings"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -47,4 +48,5 @@ type Server struct {
SyncChecker sync.Checker
ReplayerBuilder stategen.ReplayerBuilder
OptimisticModeFetcher blockchain.OptimisticModeFetcher
CoreService *core.Service
}

View File

@@ -659,11 +659,7 @@ func (bs *Server) GetValidatorQueue(
func (bs *Server) GetValidatorPerformance(
ctx context.Context, req *ethpb.ValidatorPerformanceRequest,
) (*ethpb.ValidatorPerformanceResponse, error) {
if bs.SyncChecker.Syncing() {
return nil, status.Error(codes.Unavailable, "Syncing to latest head, not ready to respond")
}
currSlot := bs.GenesisTimeFetcher.CurrentSlot()
response, err := core.ComputeValidatorPerformance(ctx, req, bs.HeadFetcher, currSlot)
response, err := bs.CoreService.ComputeValidatorPerformance(ctx, req)
if err != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not compute validator performance: %v", err.Err)
}

View File

@@ -19,6 +19,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/db"
dbTest "github.com/prysmaticlabs/prysm/v4/beacon-chain/db/testing"
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
@@ -1797,7 +1798,9 @@ func TestGetValidatorPerformance_Syncing(t *testing.T) {
ctx := context.Background()
bs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
CoreService: &core.Service{
SyncChecker: &mockSync.Sync{IsSyncing: true},
},
}
wanted := "Syncing to latest head, not ready to respond"
@@ -1857,11 +1860,13 @@ func TestGetValidatorPerformance_OK(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
@@ -1918,12 +1923,14 @@ func TestGetValidatorPerformance_Indices(t *testing.T) {
require.NoError(t, headState.SetValidators(validators))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
@@ -1988,12 +1995,14 @@ func TestGetValidatorPerformance_IndicesPubkeys(t *testing.T) {
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
@@ -2064,11 +2073,13 @@ func TestGetValidatorPerformanceAltair_OK(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
@@ -2132,11 +2143,13 @@ func TestGetValidatorPerformanceBellatrix_OK(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},
@@ -2200,11 +2213,13 @@ func TestGetValidatorPerformanceCapella_OK(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
bs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ethpb.ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKey2[:], publicKey3[:]},

View File

@@ -56,6 +56,7 @@ go_library(
"//beacon-chain/operations/synccommittee:go_default_library",
"//beacon-chain/operations/voluntaryexits:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/startup:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/state/stategen:go_default_library",

View File

@@ -1,16 +1,14 @@
package validator
import (
"bytes"
"context"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"github.com/sirupsen/logrus"
"go.opencensus.io/trace"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
@@ -115,32 +113,8 @@ func (vs *Server) SubmitSignedAggregateSelectionProof(
ctx context.Context,
req *ethpb.SignedAggregateSubmitRequest,
) (*ethpb.SignedAggregateSubmitResponse, error) {
if req.SignedAggregateAndProof == nil || req.SignedAggregateAndProof.Message == nil ||
req.SignedAggregateAndProof.Message.Aggregate == nil || req.SignedAggregateAndProof.Message.Aggregate.Data == nil {
return nil, status.Error(codes.InvalidArgument, "Signed aggregate request can't be nil")
if err := vs.CoreService.SubmitSignedAggregateSelectionProof(ctx, req); err != nil {
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not submit aggregate: %v", err.Err)
}
emptySig := make([]byte, fieldparams.BLSSignatureLength)
if bytes.Equal(req.SignedAggregateAndProof.Signature, emptySig) ||
bytes.Equal(req.SignedAggregateAndProof.Message.SelectionProof, emptySig) {
return nil, status.Error(codes.InvalidArgument, "Signed signatures can't be zero hashes")
}
// As a preventive measure, a beacon node shouldn't broadcast an attestation whose slot is out of range.
if err := helpers.ValidateAttestationTime(req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
vs.TimeFetcher.GenesisTime(), params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, status.Error(codes.InvalidArgument, "Attestation slot is no longer valid from current time")
}
if err := vs.P2P.Broadcast(ctx, req.SignedAggregateAndProof); err != nil {
return nil, status.Errorf(codes.Internal, "Could not broadcast signed aggregated attestation: %v", err)
}
log.WithFields(logrus.Fields{
"slot": req.SignedAggregateAndProof.Message.Aggregate.Data.Slot,
"committeeIndex": req.SignedAggregateAndProof.Message.Aggregate.Data.CommitteeIndex,
"validatorIndex": req.SignedAggregateAndProof.Message.AggregatorIndex,
"aggregatedCount": req.SignedAggregateAndProof.Message.Aggregate.AggregationBits.Count(),
}).Debug("Broadcasting aggregated attestation and proof")
return &ethpb.SignedAggregateSubmitResponse{}, nil
}

View File

@@ -12,6 +12,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/signing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
@@ -398,7 +399,9 @@ func TestSubmitAggregateAndProof_SelectsMostBitsWhenOwnAttestationNotPresent(t *
}
func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T) {
aggregatorServer := &Server{}
aggregatorServer := &Server{
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
}
req := &ethpb.SignedAggregateSubmitRequest{
SignedAggregateAndProof: &ethpb.SignedAggregateAttestationAndProof{
Signature: make([]byte, fieldparams.BLSSignatureLength),
@@ -410,7 +413,7 @@ func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T)
},
}
_, err := aggregatorServer.SubmitSignedAggregateSelectionProof(context.Background(), req)
require.ErrorContains(t, "Signed signatures can't be zero hashes", err)
require.ErrorContains(t, "signed signatures can't be zero hashes", err)
req = &ethpb.SignedAggregateSubmitRequest{
SignedAggregateAndProof: &ethpb.SignedAggregateAttestationAndProof{
@@ -424,12 +427,16 @@ func TestSubmitSignedAggregateSelectionProof_ZeroHashesSignatures(t *testing.T)
},
}
_, err = aggregatorServer.SubmitSignedAggregateSelectionProof(context.Background(), req)
require.ErrorContains(t, "Signed signatures can't be zero hashes", err)
require.ErrorContains(t, "signed signatures can't be zero hashes", err)
}
func TestSubmitSignedAggregateSelectionProof_InvalidSlot(t *testing.T) {
c := &mock.ChainService{Genesis: time.Now()}
aggregatorServer := &Server{TimeFetcher: c}
aggregatorServer := &Server{
CoreService: &core.Service{
GenesisTimeFetcher: c,
},
}
req := &ethpb.SignedAggregateSubmitRequest{
SignedAggregateAndProof: &ethpb.SignedAggregateAttestationAndProof{
Signature: []byte{'a'},
@@ -442,5 +449,5 @@ func TestSubmitSignedAggregateSelectionProof_InvalidSlot(t *testing.T) {
},
}
_, err := aggregatorServer.SubmitSignedAggregateSelectionProof(context.Background(), req)
require.ErrorContains(t, "Attestation slot is no longer valid from current time", err)
require.ErrorContains(t, "attestation slot is no longer valid from current time", err)
}

View File

@@ -11,10 +11,10 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
coreTime "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
beaconState "github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/rand"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpbv1 "github.com/prysmaticlabs/prysm/v4/proto/eth/v1"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
@@ -234,8 +234,8 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
validatorAssignments = append(validatorAssignments, assignment)
nextValidatorAssignments = append(nextValidatorAssignments, nextAssignment)
// Assign relevant validator to subnet.
assignValidatorToSubnet(pubKey, assignment.Status)
assignValidatorToSubnet(pubKey, nextAssignment.Status)
core.AssignValidatorToSubnetProto(pubKey, assignment.Status)
core.AssignValidatorToSubnetProto(pubKey, nextAssignment.Status)
}
return &ethpb.DutiesResponse{
@@ -248,34 +248,10 @@ func (vs *Server) duties(ctx context.Context, req *ethpb.DutiesRequest) (*ethpb.
// AssignValidatorToSubnet checks the status and pubkey of a particular validator
// to discern whether persistent subnets need to be registered for them.
func (vs *Server) AssignValidatorToSubnet(_ context.Context, req *ethpb.AssignValidatorToSubnetRequest) (*emptypb.Empty, error) {
assignValidatorToSubnet(req.PublicKey, req.Status)
core.AssignValidatorToSubnetProto(req.PublicKey, req.Status)
return &emptypb.Empty{}, nil
}
func assignValidatorToSubnet(pubkey []byte, status ethpb.ValidatorStatus) {
if status != ethpb.ValidatorStatus_ACTIVE && status != ethpb.ValidatorStatus_EXITING {
return
}
_, ok, expTime := cache.SubnetIDs.GetPersistentSubnets(pubkey)
if ok && expTime.After(prysmTime.Now()) {
return
}
epochDuration := time.Duration(params.BeaconConfig().SlotsPerEpoch.Mul(params.BeaconConfig().SecondsPerSlot))
var assignedIdxs []uint64
randGen := rand.NewGenerator()
for i := uint64(0); i < params.BeaconConfig().RandomSubnetsPerValidator; i++ {
assignedIdx := randGen.Intn(int(params.BeaconNetworkConfig().AttestationSubnetCount))
assignedIdxs = append(assignedIdxs, uint64(assignedIdx))
}
assignedDuration := uint64(randGen.Intn(int(params.BeaconConfig().EpochsPerRandomSubnetSubscription)))
assignedDuration += params.BeaconConfig().EpochsPerRandomSubnetSubscription
totalDuration := epochDuration * time.Duration(assignedDuration)
cache.SubnetIDs.AddPersistentCommittee(pubkey, assignedIdxs, totalDuration*time.Second)
}
func registerSyncSubnetCurrentPeriod(s beaconState.BeaconState, epoch primitives.Epoch, pubKey []byte, status ethpb.ValidatorStatus) error {
committee, err := s.CurrentSyncCommittee()
if err != nil {

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
mockExecution "github.com/prysmaticlabs/prysm/v4/beacon-chain/execution/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -606,7 +607,7 @@ func TestStreamDuties_OK_ChainReorg(t *testing.T) {
func TestAssignValidatorToSubnet(t *testing.T) {
k := pubKey(3)
assignValidatorToSubnet(k, ethpb.ValidatorStatus_ACTIVE)
core.AssignValidatorToSubnetProto(k, ethpb.ValidatorStatus_ACTIVE)
coms, ok, exp := cache.SubnetIDs.GetPersistentSubnets(k)
require.Equal(t, true, ok, "No cache entry found for validator")
assert.Equal(t, params.BeaconConfig().RandomSubnetsPerValidator, uint64(len(coms)))

View File

@@ -2,19 +2,14 @@ package validator
import (
"context"
"errors"
"fmt"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/time"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/time/slots"
"go.opencensus.io/trace"
@@ -42,102 +37,9 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
return nil, err
}
if err := helpers.ValidateAttestationTime(req.Slot, vs.TimeFetcher.GenesisTime(),
params.BeaconNetworkConfig().MaximumGossipClockDisparity); err != nil {
return nil, status.Error(codes.InvalidArgument, fmt.Sprintf("invalid request: %v", err))
}
res, err := vs.AttestationCache.Get(ctx, req)
res, err := vs.CoreService.GetAttestationData(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve data from attestation cache: %v", err)
}
if res != nil {
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
if err := vs.AttestationCache.MarkInProgress(req); err != nil {
if errors.Is(err, cache.ErrAlreadyInProgress) {
res, err := vs.AttestationCache.Get(ctx, req)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve data from attestation cache: %v", err)
}
if res == nil {
return nil, status.Error(codes.DataLoss, "A request was in progress and resolved to nil")
}
res.CommitteeIndex = req.CommitteeIndex
return res, nil
}
return nil, status.Errorf(codes.Internal, "Could not mark attestation as in-progress: %v", err)
}
defer func() {
if err := vs.AttestationCache.MarkNotInProgress(req); err != nil {
log.WithError(err).Error("Could not mark cache not in progress")
}
}()
headState, err := vs.HeadFetcher.HeadState(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
}
headRoot, err := vs.HeadFetcher.HeadRoot(ctx)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not retrieve head root: %v", err)
}
// In the case that we receive an attestation request after a newer state/block has been processed.
if headState.Slot() > req.Slot {
headRoot, err = helpers.BlockRootAtSlot(headState, req.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get historical head root: %v", err)
}
headState, err = vs.StateGen.StateByRoot(ctx, bytesutil.ToBytes32(headRoot))
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get historical head state: %v", err)
}
}
if headState == nil || headState.IsNil() {
return nil, status.Error(codes.Internal, "Could not lookup parent state from head.")
}
if time.CurrentEpoch(headState) < slots.ToEpoch(req.Slot) {
headState, err = transition.ProcessSlotsUsingNextSlotCache(ctx, headState, headRoot, req.Slot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not process slots up to %d: %v", req.Slot, err)
}
}
targetEpoch := time.CurrentEpoch(headState)
epochStartSlot, err := slots.EpochStart(targetEpoch)
if err != nil {
return nil, err
}
var targetRoot []byte
if epochStartSlot == headState.Slot() {
targetRoot = headRoot
} else {
targetRoot, err = helpers.BlockRootAtSlot(headState, epochStartSlot)
if err != nil {
return nil, status.Errorf(codes.Internal, "Could not get target block for slot %d: %v", epochStartSlot, err)
}
if bytesutil.ToBytes32(targetRoot) == params.BeaconConfig().ZeroHash {
targetRoot = headRoot
}
}
res = &ethpb.AttestationData{
Slot: req.Slot,
CommitteeIndex: req.CommitteeIndex,
BeaconBlockRoot: headRoot,
Source: headState.CurrentJustifiedCheckpoint(),
Target: &ethpb.Checkpoint{
Epoch: targetEpoch,
Root: targetRoot,
},
}
if err := vs.AttestationCache.Put(ctx, req, res); err != nil {
return nil, status.Errorf(codes.Internal, "Could not store attestation data in cache: %v", err)
return nil, status.Errorf(core.ErrorReasonToGRPC(err.Reason), "Could not get attestation data: %v", err.Err)
}
return res, nil
}

View File

@@ -7,7 +7,7 @@ import (
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/cache"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -65,20 +65,16 @@ func TestAttestationDataAtSlot_HandlesFarAwayJustifiedEpoch(t *testing.T) {
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = epochBoundaryRoot[:]
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedBlockRoot[:]
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
chainService := &mock.ChainService{
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
FinalizationFetcher: &mock.ChainService{
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
StateNotifier: chainService.StateNotifier(),
}
req := &ethpb.AttestationDataRequest{

View File

@@ -13,6 +13,7 @@ import (
doublylinkedtree "github.com/prysmaticlabs/prysm/v4/beacon-chain/forkchoice/doubly-linked-tree"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/attestations"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
state_native "github.com/prysmaticlabs/prysm/v4/beacon-chain/state/state-native"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
@@ -34,7 +35,6 @@ func TestProposeAttestation_OK(t *testing.T) {
attesterServer := &Server{
HeadFetcher: &mock.ChainService{},
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
}
@@ -78,7 +78,6 @@ func TestProposeAttestation_IncorrectSignature(t *testing.T) {
attesterServer := &Server{
HeadFetcher: &mock.ChainService{},
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
}
@@ -117,24 +116,20 @@ func TestGetAttestationData_OK(t *testing.T) {
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:]
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:]
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
chainService := &mock.ChainService{
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{
State: beaconState, Root: blockRoot[:],
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{
State: beaconState, Root: blockRoot[:],
},
GenesisTimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
},
FinalizationFetcher: &mock.ChainService{
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
},
TimeFetcher: &mock.ChainService{
Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second),
},
StateNotifier: chainService.StateNotifier(),
}
req := &ethpb.AttestationDataRequest{
@@ -163,7 +158,7 @@ func TestGetAttestationData_OK(t *testing.T) {
}
func TestGetAttestationData_SyncNotReady(t *testing.T) {
as := &Server{
as := Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
}
_, err := as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
@@ -178,9 +173,12 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
as := &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
OptimisticModeFetcher: &mock.ChainService{Optimistic: true},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
CoreService: &core.Service{
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{},
},
}
_, err := as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
s, ok := status.FromError(err)
@@ -192,10 +190,13 @@ func TestGetAttestationData_Optimistic(t *testing.T) {
require.NoError(t, err)
as = &Server{
SyncChecker: &mockSync.Sync{},
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
AttestationCache: cache.NewAttestationCache(),
TimeFetcher: &mock.ChainService{Genesis: time.Now()},
CoreService: &core.Service{
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now()},
HeadFetcher: &mock.ChainService{Optimistic: false, State: beaconState},
AttestationCache: cache.NewAttestationCache(),
},
}
_, err = as.GetAttestationData(context.Background(), &ethpb.AttestationDataRequest{})
require.NoError(t, err)
@@ -206,17 +207,17 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
state, err := state_native.InitializeFromProtoPhase0(s)
require.NoError(t, err)
ctx := context.Background()
chainService := &mock.ChainService{
Genesis: time.Now(),
}
slot := primitives.Slot(2)
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
server := &Server{
HeadFetcher: &mock.ChainService{State: state},
AttestationCache: cache.NewAttestationCache(),
SyncChecker: &mockSync.Sync{IsSyncing: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
StateNotifier: chainService.StateNotifier(),
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{State: state},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
}
req := &ethpb.AttestationDataRequest{
@@ -229,7 +230,7 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
Target: &ethpb.Checkpoint{Epoch: 55, Root: make([]byte, 32)},
}
require.NoError(t, server.AttestationCache.MarkInProgress(req))
require.NoError(t, server.CoreService.AttestationCache.MarkInProgress(req))
var wg sync.WaitGroup
@@ -247,8 +248,8 @@ func TestAttestationDataSlot_handlesInProgressRequest(t *testing.T) {
go func() {
defer wg.Done()
assert.NoError(t, server.AttestationCache.Put(ctx, req, res))
assert.NoError(t, server.AttestationCache.MarkNotInProgress(req))
assert.NoError(t, server.CoreService.AttestationCache.Put(ctx, req, res))
assert.NoError(t, server.CoreService.AttestationCache.MarkNotInProgress(req))
}()
wg.Wait()
@@ -260,9 +261,12 @@ func TestServer_GetAttestationData_InvalidRequestSlot(t *testing.T) {
slot := 3*params.BeaconConfig().SlotsPerEpoch + 1
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: false},
HeadFetcher: &mock.ChainService{},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
}
req := &ethpb.AttestationDataRequest{
@@ -323,19 +327,17 @@ func TestServer_GetAttestationData_HeadStateSlotGreaterThanRequestSlot(t *testin
beaconstate := beaconState.Copy()
require.NoError(t, beaconstate.SetSlot(beaconstate.Slot()-1))
require.NoError(t, db.SaveState(ctx, beaconstate, blockRoot2))
chainService := &mock.ChainService{
Genesis: time.Now(),
}
offset = int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
FinalizationFetcher: &mock.ChainService{CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint()},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
StateNotifier: chainService.StateNotifier(),
StateGen: stategen.New(db, doublylinkedtree.New()),
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{State: beaconState, Root: blockRoot[:]},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
StateGen: stategen.New(db, doublylinkedtree.New()),
},
}
require.NoError(t, db.SaveState(ctx, beaconState, blockRoot))
util.SaveBlock(t, ctx, db, block)
@@ -394,22 +396,18 @@ func TestGetAttestationData_SucceedsInFirstEpoch(t *testing.T) {
blockRoots[1*params.BeaconConfig().SlotsPerEpoch] = targetRoot[:]
blockRoots[2*params.BeaconConfig().SlotsPerEpoch] = justifiedRoot[:]
require.NoError(t, beaconState.SetBlockRoots(blockRoots))
chainService := &mock.ChainService{
Genesis: time.Now(),
}
offset := int64(slot.Mul(params.BeaconConfig().SecondsPerSlot))
attesterServer := &Server{
P2P: &mockp2p.MockBroadcaster{},
SyncChecker: &mockSync.Sync{IsSyncing: false},
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{
State: beaconState, Root: blockRoot[:],
SyncChecker: &mockSync.Sync{IsSyncing: false},
OptimisticModeFetcher: &mock.ChainService{Optimistic: false},
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
CoreService: &core.Service{
AttestationCache: cache.NewAttestationCache(),
HeadFetcher: &mock.ChainService{
State: beaconState, Root: blockRoot[:],
},
GenesisTimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
},
FinalizationFetcher: &mock.ChainService{
CurrentJustifiedCheckPoint: beaconState.CurrentJustifiedCheckpoint(),
},
TimeFetcher: &mock.ChainService{Genesis: prysmTime.Now().Add(time.Duration(-1*offset) * time.Second)},
StateNotifier: chainService.StateNotifier(),
}
req := &ethpb.AttestationDataRequest{
@@ -441,7 +439,6 @@ func TestServer_SubscribeCommitteeSubnets_NoSlots(t *testing.T) {
attesterServer := &Server{
HeadFetcher: &mock.ChainService{},
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
}
@@ -462,7 +459,6 @@ func TestServer_SubscribeCommitteeSubnets_DifferentLengthSlots(t *testing.T) {
attesterServer := &Server{
HeadFetcher: &mock.ChainService{},
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
}
@@ -508,7 +504,6 @@ func TestServer_SubscribeCommitteeSubnets_MultipleSlots(t *testing.T) {
attesterServer := &Server{
HeadFetcher: &mock.ChainService{State: state},
P2P: &mockp2p.MockBroadcaster{},
AttestationCache: cache.NewAttestationCache(),
AttPool: attestations.NewPool(),
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
}

View File

@@ -23,6 +23,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/startup"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state/stategen"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
@@ -41,7 +42,6 @@ import (
// and more.
type Server struct {
Ctx context.Context
AttestationCache *cache.AttestationCache
ProposerSlotIndexCache *cache.ProposerPayloadIDsCache
HeadFetcher blockchain.HeadFetcher
ForkFetcher blockchain.ForkFetcher
@@ -74,6 +74,7 @@ type Server struct {
BlockBuilder builder.BlockBuilder
BLSChangesPool blstoexec.PoolManager
ClockWaiter startup.ClockWaiter
CoreService *core.Service
}
// WaitForActivation checks if a validator public key exists in the active validator registry of the current

View File

@@ -5,8 +5,7 @@ import (
"context"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed"
opfeed "github.com/prysmaticlabs/prysm/v4/beacon-chain/core/feed/operation"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/crypto/bls"
@@ -121,30 +120,11 @@ func (vs *Server) GetSyncCommitteeContribution(
func (vs *Server) SubmitSignedContributionAndProof(
ctx context.Context, s *ethpb.SignedContributionAndProof,
) (*emptypb.Empty, error) {
errs, ctx := errgroup.WithContext(ctx)
// Broadcasting and saving contribution into the pool in parallel. As one fail should not affect another.
errs.Go(func() error {
return vs.P2P.Broadcast(ctx, s)
})
if err := vs.SyncCommitteePool.SaveSyncCommitteeContribution(s.Message.Contribution); err != nil {
return nil, err
err := vs.CoreService.SubmitSignedContributionAndProof(ctx, s)
if err != nil {
return &emptypb.Empty{}, status.Errorf(core.ErrorReasonToGRPC(err.Reason), err.Err.Error())
}
// Wait for p2p broadcast to complete and return the first error (if any)
err := errs.Wait()
if err == nil {
vs.OperationNotifier.OperationFeed().Send(&feed.Event{
Type: opfeed.SyncCommitteeContributionReceived,
Data: &opfeed.SyncCommitteeContributionReceivedData{
Contribution: s,
},
})
}
return &emptypb.Empty{}, err
return &emptypb.Empty{}, nil
}
// AggregatedSigAndAggregationBits returns the aggregated signature and aggregation bits

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/transition"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
mockp2p "github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/config/params"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
@@ -137,9 +138,11 @@ func TestGetSyncCommitteeContribution_FiltersDuplicates(t *testing.T) {
func TestSubmitSignedContributionAndProof_OK(t *testing.T) {
server := &Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
CoreService: &core.Service{
SyncCommitteePool: synccommittee.NewStore(),
Broadcaster: &mockp2p.MockBroadcaster{},
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
},
}
contribution := &ethpb.SignedContributionAndProof{
Message: &ethpb.ContributionAndProof{
@@ -151,21 +154,23 @@ func TestSubmitSignedContributionAndProof_OK(t *testing.T) {
}
_, err := server.SubmitSignedContributionAndProof(context.Background(), contribution)
require.NoError(t, err)
savedMsgs, err := server.SyncCommitteePool.SyncCommitteeContributions(1)
savedMsgs, err := server.CoreService.SyncCommitteePool.SyncCommitteeContributions(1)
require.NoError(t, err)
require.DeepEqual(t, []*ethpb.SyncCommitteeContribution{contribution.Message.Contribution}, savedMsgs)
}
func TestSubmitSignedContributionAndProof_Notification(t *testing.T) {
server := &Server{
SyncCommitteePool: synccommittee.NewStore(),
P2P: &mockp2p.MockBroadcaster{},
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
CoreService: &core.Service{
SyncCommitteePool: synccommittee.NewStore(),
Broadcaster: &mockp2p.MockBroadcaster{},
OperationNotifier: (&mock.ChainService{}).OperationNotifier(),
},
}
// Subscribe to operation notifications.
opChannel := make(chan *feed.Event, 1024)
opSub := server.OperationNotifier.OperationFeed().Subscribe(opChannel)
opSub := server.CoreService.OperationNotifier.OperationFeed().Subscribe(opChannel)
defer opSub.Unsubscribe()
contribution := &ethpb.SignedContributionAndProof{

View File

@@ -13,7 +13,7 @@ go_library(
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/sync:go_default_library",
"//consensus-types/primitives:go_default_library",
"//network:go_default_library",
"//network/http:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
],
)
@@ -26,6 +26,7 @@ go_test(
"//beacon-chain/blockchain/testing:go_default_library",
"//beacon-chain/core/epoch/precompute:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/rpc/core:go_default_library",
"//beacon-chain/state:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//config/params:go_default_library",

View File

@@ -2,6 +2,7 @@ package validator
import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/sync"
)
@@ -11,4 +12,5 @@ type Server struct {
GenesisTimeFetcher blockchain.TimeFetcher
SyncChecker sync.Checker
HeadFetcher blockchain.HeadFetcher
CoreService *core.Service
}

View File

@@ -6,7 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/network"
http2 "github.com/prysmaticlabs/prysm/v4/network/http"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
)
@@ -29,12 +29,6 @@ type ValidatorPerformanceResponse struct {
// GetValidatorPerformance is an HTTP handler for GetValidatorPerformance.
func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request) {
if vs.SyncChecker.Syncing() {
handleHTTPError(w, "Syncing", http.StatusServiceUnavailable)
return
}
ctx := r.Context()
currSlot := vs.GenesisTimeFetcher.CurrentSlot()
var req ValidatorPerformanceRequest
if r.Body != http.NoBody {
if err := json.NewDecoder(r.Body).Decode(&req); err != nil {
@@ -42,14 +36,12 @@ func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request
return
}
}
computed, err := core.ComputeValidatorPerformance(
ctx,
computed, err := vs.CoreService.ComputeValidatorPerformance(
r.Context(),
&ethpb.ValidatorPerformanceRequest{
PublicKeys: req.PublicKeys,
Indices: req.Indices,
},
vs.HeadFetcher,
currSlot,
)
if err != nil {
handleHTTPError(w, "Could not compute validator performance: "+err.Err.Error(), core.ErrorReasonToHTTP(err.Reason))
@@ -66,13 +58,13 @@ func (vs *Server) GetValidatorPerformance(w http.ResponseWriter, r *http.Request
MissingValidators: computed.MissingValidators,
InactivityScores: computed.InactivityScores, // Only populated in Altair
}
network.WriteJson(w, response)
http2.WriteJson(w, response)
}
func handleHTTPError(w http.ResponseWriter, message string, code int) {
errJson := &network.DefaultErrorJson{
errJson := &http2.DefaultErrorJson{
Message: message,
Code: code,
}
network.WriteError(w, errJson)
http2.WriteError(w, errJson)
}

View File

@@ -14,6 +14,7 @@ import (
mock "github.com/prysmaticlabs/prysm/v4/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/epoch/precompute"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
mockSync "github.com/prysmaticlabs/prysm/v4/beacon-chain/sync/initial-sync/testing"
"github.com/prysmaticlabs/prysm/v4/config/params"
@@ -28,12 +29,13 @@ import (
func TestServer_GetValidatorPerformance(t *testing.T) {
t.Run("Syncing", func(t *testing.T) {
vs := &Server{
SyncChecker: &mockSync.Sync{IsSyncing: true},
CoreService: &core.Service{
SyncChecker: &mockSync.Sync{IsSyncing: true},
},
}
var buf bytes.Buffer
srv := httptest.NewServer(http.HandlerFunc(vs.GetValidatorPerformance))
req := httptest.NewRequest("POST", "/foo", &buf)
req := httptest.NewRequest("POST", "/foo", nil)
client := &http.Client{}
rawResp, err := client.Post(srv.URL, "application/json", req.Body)
@@ -57,11 +59,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
@@ -111,12 +115,14 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
@@ -174,12 +180,14 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
// 10 epochs into the future.
State: headState,
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
},
SyncChecker: &mockSync.Sync{IsSyncing: false},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
}
c := headState.Copy()
vp, bp, err := precompute.New(ctx, c)
@@ -243,11 +251,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
@@ -303,11 +313,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},
@@ -363,11 +375,13 @@ func TestServer_GetValidatorPerformance(t *testing.T) {
require.NoError(t, headState.SetBalances([]uint64{100, 101, 102}))
offset := int64(headState.Slot().Mul(params.BeaconConfig().SecondsPerSlot))
vs := &Server{
HeadFetcher: &mock.ChainService{
State: headState,
CoreService: &core.Service{
HeadFetcher: &mock.ChainService{
State: headState,
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
},
GenesisTimeFetcher: &mock.ChainService{Genesis: time.Now().Add(time.Duration(-1*offset) * time.Second)},
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
want := &ValidatorPerformanceResponse{
PublicKeys: [][]byte{publicKeys[1][:], publicKeys[2][:]},

View File

@@ -30,6 +30,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/synccommittee"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/operations/voluntaryexits"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/core"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/beacon"
rpcBuilder "github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/builder"
"github.com/prysmaticlabs/prysm/v4/beacon-chain/rpc/eth/debug"
@@ -231,9 +232,19 @@ func (s *Service) Start() {
}
s.cfg.Router.HandleFunc("/eth/v1/builder/states/{state_id}/expected_withdrawals", builderServer.ExpectedWithdrawals).Methods(http.MethodGet)
coreService := &core.Service{
HeadFetcher: s.cfg.HeadFetcher,
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
SyncChecker: s.cfg.SyncService,
Broadcaster: s.cfg.Broadcaster,
SyncCommitteePool: s.cfg.SyncCommitteeObjectPool,
OperationNotifier: s.cfg.OperationNotifier,
AttestationCache: cache.NewAttestationCache(),
StateGen: s.cfg.StateGen,
}
validatorServer := &validatorv1alpha1.Server{
Ctx: s.ctx,
AttestationCache: cache.NewAttestationCache(),
AttPool: s.cfg.AttestationsPool,
ExitPool: s.cfg.ExitPool,
HeadFetcher: s.cfg.HeadFetcher,
@@ -266,6 +277,7 @@ func (s *Service) Start() {
BlockBuilder: s.cfg.BlockBuilder,
BLSChangesPool: s.cfg.BLSChangesPool,
ClockWaiter: s.cfg.ClockWaiter,
CoreService: coreService,
}
validatorServerV1 := &validator.Server{
HeadFetcher: s.cfg.HeadFetcher,
@@ -282,8 +294,17 @@ func (s *Service) Start() {
ChainInfoFetcher: s.cfg.ChainInfoFetcher,
BeaconDB: s.cfg.BeaconDB,
BlockBuilder: s.cfg.BlockBuilder,
OperationNotifier: s.cfg.OperationNotifier,
CoreService: coreService,
}
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_attestation", validatorServerV1.GetAggregateAttestation).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/eth/v1/validator/contribution_and_proofs", validatorServerV1.SubmitContributionAndProofs).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/aggregate_and_proofs", validatorServerV1.SubmitAggregateAndProofs).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/sync_committee_subscriptions", validatorServerV1.SubmitSyncCommitteeSubscription).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/beacon_committee_subscriptions", validatorServerV1.SubmitBeaconCommitteeSubscription).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v1/validator/attestation_data", validatorServerV1.GetAttestationData).Methods(http.MethodGet)
nodeServer := &nodev1alpha1.Server{
LogsStreamer: logs.NewStreamServer(),
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
@@ -298,7 +319,7 @@ func (s *Service) Start() {
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
}
nodeServerV1 := &node.Server{
nodeServerEth := &node.Server{
BeaconDB: s.cfg.BeaconDB,
Server: s.grpcServer,
SyncChecker: s.cfg.SyncService,
@@ -311,6 +332,8 @@ func (s *Service) Start() {
ExecutionChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
}
s.cfg.Router.HandleFunc("/eth/v1/node/syncing", nodeServerEth.GetSyncStatus).Methods(http.MethodGet)
nodeServerPrysm := &nodeprysm.Server{
BeaconDB: s.cfg.BeaconDB,
SyncChecker: s.cfg.SyncService,
@@ -349,6 +372,7 @@ func (s *Service) Start() {
ReceivedAttestationsBuffer: make(chan *ethpbv1alpha1.Attestation, attestationBufferSize),
CollectedAttestationsBuffer: make(chan []*ethpbv1alpha1.Attestation, attestationBufferSize),
ReplayerBuilder: ch,
CoreService: coreService,
}
beaconChainServerV1 := &beacon.Server{
CanonicalHistory: ch,
@@ -379,12 +403,13 @@ func (s *Service) Start() {
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
HeadFetcher: s.cfg.HeadFetcher,
SyncChecker: s.cfg.SyncService,
CoreService: coreService,
}
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance).Methods(http.MethodGet)
s.cfg.Router.HandleFunc("/prysm/validators/performance", httpServer.GetValidatorPerformance).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blocks", beaconChainServerV1.PublishBlockV2).Methods(http.MethodPost)
s.cfg.Router.HandleFunc("/eth/v2/beacon/blinded_blocks", beaconChainServerV1.PublishBlindedBlockV2).Methods(http.MethodPost)
ethpbv1alpha1.RegisterNodeServer(s.grpcServer, nodeServer)
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerV1)
ethpbservice.RegisterBeaconNodeServer(s.grpcServer, nodeServerEth)
ethpbv1alpha1.RegisterHealthServer(s.grpcServer, nodeServer)
ethpbv1alpha1.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
ethpbservice.RegisterBeaconChainServer(s.grpcServer, beaconChainServerV1)

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/consensus-types/interfaces"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
// MockBlocker is a fake implementation of lookup.Blocker.
@@ -13,6 +14,7 @@ type MockBlocker struct {
BlockToReturn interfaces.ReadOnlySignedBeaconBlock
ErrorToReturn error
SlotBlockMap map[primitives.Slot]interfaces.ReadOnlySignedBeaconBlock
RootBlockMap map[[32]byte]interfaces.ReadOnlySignedBeaconBlock
}
// Block --
@@ -20,10 +22,13 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
if m.ErrorToReturn != nil {
return nil, m.ErrorToReturn
}
if m.BlockToReturn != nil {
return m.BlockToReturn, nil
}
slotNumber, parseErr := strconv.ParseUint(string(b), 10, 64)
if parseErr != nil {
//nolint:nilerr
return m.BlockToReturn, nil
return m.RootBlockMap[bytesutil.ToBytes32(b)], nil
}
return m.SlotBlockMap[primitives.Slot(slotNumber)], nil
}

View File

@@ -5,6 +5,7 @@ import (
"github.com/prysmaticlabs/prysm/v4/beacon-chain/state"
"github.com/prysmaticlabs/prysm/v4/consensus-types/primitives"
"github.com/prysmaticlabs/prysm/v4/encoding/bytesutil"
)
// MockStater is a fake implementation of lookup.Stater.
@@ -12,11 +13,15 @@ type MockStater struct {
BeaconState state.BeaconState
BeaconStateRoot []byte
StatesBySlot map[primitives.Slot]state.BeaconState
StatesByRoot map[[32]byte]state.BeaconState
}
// State --
func (m *MockStater) State(context.Context, []byte) (state.BeaconState, error) {
return m.BeaconState, nil
func (m *MockStater) State(_ context.Context, id []byte) (state.BeaconState, error) {
if m.BeaconState != nil {
return m.BeaconState, nil
}
return m.StatesByRoot[bytesutil.ToBytes32(id)], nil
}
// StateRoot --

View File

@@ -5,6 +5,7 @@ package state
import (
"context"
"encoding/json"
"github.com/prysmaticlabs/go-bitfield"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
@@ -23,6 +24,7 @@ type BeaconState interface {
CopyAllTries()
HashTreeRoot(ctx context.Context) ([32]byte, error)
StateProver
json.Marshaler
}
// SpecParametersProvider provides fork-specific configuration parameters as

View File

@@ -3,6 +3,7 @@
package state_native
import (
"encoding/json"
"sync"
"github.com/prysmaticlabs/go-bitfield"
@@ -60,3 +61,76 @@ type BeaconState struct {
merkleLayers [][][]byte
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
}
type beaconStateMarshalable struct {
Version int `json:"version" yaml:"version"`
GenesisTime uint64 `json:"genesis_time" yaml:"genesis_time"`
GenesisValidatorsRoot [32]byte `json:"genesis_validators_root" yaml:"genesis_validators_root"`
Slot primitives.Slot `json:"slot" yaml:"slot"`
Fork *ethpb.Fork `json:"fork" yaml:"fork"`
LatestBlockHeader *ethpb.BeaconBlockHeader `json:"latest_block_header" yaml:"latest_block_header"`
BlockRoots *customtypes.BlockRoots `json:"block_roots" yaml:"block_roots"`
StateRoots *customtypes.StateRoots `json:"state_roots" yaml:"state_roots"`
HistoricalRoots customtypes.HistoricalRoots `json:"historical_roots" yaml:"historical_roots"`
HistoricalSummaries []*ethpb.HistoricalSummary `json:"historical_summaries" yaml:"historical_summaries"`
Eth1Data *ethpb.Eth1Data `json:"eth_1_data" yaml:"eth_1_data"`
Eth1DataVotes []*ethpb.Eth1Data `json:"eth_1_data_votes" yaml:"eth_1_data_votes"`
Eth1DepositIndex uint64 `json:"eth_1_deposit_index" yaml:"eth_1_deposit_index"`
Validators []*ethpb.Validator `json:"validators" yaml:"validators"`
Balances []uint64 `json:"balances" yaml:"balances"`
RandaoMixes *customtypes.RandaoMixes `json:"randao_mixes" yaml:"randao_mixes"`
Slashings []uint64 `json:"slashings" yaml:"slashings"`
PreviousEpochAttestations []*ethpb.PendingAttestation `json:"previous_epoch_attestations" yaml:"previous_epoch_attestations"`
CurrentEpochAttestations []*ethpb.PendingAttestation `json:"current_epoch_attestations" yaml:"current_epoch_attestations"`
PreviousEpochParticipation []byte `json:"previous_epoch_participation" yaml:"previous_epoch_participation"`
CurrentEpochParticipation []byte `json:"current_epoch_participation" yaml:"current_epoch_participation"`
JustificationBits bitfield.Bitvector4 `json:"justification_bits" yaml:"justification_bits"`
PreviousJustifiedCheckpoint *ethpb.Checkpoint `json:"previous_justified_checkpoint" yaml:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *ethpb.Checkpoint `json:"current_justified_checkpoint" yaml:"current_justified_checkpoint"`
FinalizedCheckpoint *ethpb.Checkpoint `json:"finalized_checkpoint" yaml:"finalized_checkpoint"`
InactivityScores []uint64 `json:"inactivity_scores" yaml:"inactivity_scores"`
CurrentSyncCommittee *ethpb.SyncCommittee `json:"current_sync_committee" yaml:"current_sync_committee"`
NextSyncCommittee *ethpb.SyncCommittee `json:"next_sync_committee" yaml:"next_sync_committee"`
LatestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `json:"latest_execution_payload_header" yaml:"latest_execution_payload_header"`
LatestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella `json:"latest_execution_payload_header_capella" yaml:"latest_execution_payload_header_capella"`
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
}
func (b *BeaconState) MarshalJSON() ([]byte, error) {
marshalable := &beaconStateMarshalable{
Version: b.version,
GenesisTime: b.genesisTime,
GenesisValidatorsRoot: b.genesisValidatorsRoot,
Slot: b.slot,
Fork: b.fork,
LatestBlockHeader: b.latestBlockHeader,
BlockRoots: b.blockRoots,
StateRoots: b.stateRoots,
HistoricalRoots: b.historicalRoots,
HistoricalSummaries: b.historicalSummaries,
Eth1Data: b.eth1Data,
Eth1DataVotes: b.eth1DataVotes,
Eth1DepositIndex: b.eth1DepositIndex,
Validators: b.validators,
Balances: b.balances,
RandaoMixes: b.randaoMixes,
Slashings: b.slashings,
PreviousEpochAttestations: b.previousEpochAttestations,
CurrentEpochAttestations: b.currentEpochAttestations,
PreviousEpochParticipation: b.previousEpochParticipation,
CurrentEpochParticipation: b.currentEpochParticipation,
JustificationBits: b.justificationBits,
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
FinalizedCheckpoint: b.finalizedCheckpoint,
InactivityScores: b.inactivityScores,
CurrentSyncCommittee: b.currentSyncCommittee,
NextSyncCommittee: b.nextSyncCommittee,
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader,
LatestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella,
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
}
return json.Marshal(marshalable)
}

View File

@@ -3,6 +3,7 @@
package state_native
import (
"encoding/json"
"sync"
"github.com/prysmaticlabs/go-bitfield"
@@ -60,3 +61,76 @@ type BeaconState struct {
merkleLayers [][][]byte
sharedFieldReferences map[types.FieldIndex]*stateutil.Reference
}
type beaconStateMarshalable struct {
Version int `json:"version" yaml:"version"`
GenesisTime uint64 `json:"genesis_time" yaml:"genesis_time"`
GenesisValidatorsRoot [32]byte `json:"genesis_validators_root" yaml:"genesis_validators_root"`
Slot primitives.Slot `json:"slot" yaml:"slot"`
Fork *ethpb.Fork `json:"fork" yaml:"fork"`
LatestBlockHeader *ethpb.BeaconBlockHeader `json:"latest_block_header" yaml:"latest_block_header"`
BlockRoots *customtypes.BlockRoots `json:"block_roots" yaml:"block_roots"`
StateRoots *customtypes.StateRoots `json:"state_roots" yaml:"state_roots"`
HistoricalRoots customtypes.HistoricalRoots `json:"historical_roots" yaml:"historical_roots"`
HistoricalSummaries []*ethpb.HistoricalSummary `json:"historical_summaries" yaml:"historical_summaries"`
Eth1Data *ethpb.Eth1Data `json:"eth_1_data" yaml:"eth_1_data"`
Eth1DataVotes []*ethpb.Eth1Data `json:"eth_1_data_votes" yaml:"eth_1_data_votes"`
Eth1DepositIndex uint64 `json:"eth_1_deposit_index" yaml:"eth_1_deposit_index"`
Validators []*ethpb.Validator `json:"validators" yaml:"validators"`
Balances []uint64 `json:"balances" yaml:"balances"`
RandaoMixes *customtypes.RandaoMixes `json:"randao_mixes" yaml:"randao_mixes"`
Slashings []uint64 `json:"slashings" yaml:"slashings"`
PreviousEpochAttestations []*ethpb.PendingAttestation `json:"previous_epoch_attestations" yaml:"previous_epoch_attestations"`
CurrentEpochAttestations []*ethpb.PendingAttestation `json:"current_epoch_attestations" yaml:"current_epoch_attestations"`
PreviousEpochParticipation []byte `json:"previous_epoch_participation" yaml:"previous_epoch_participation"`
CurrentEpochParticipation []byte `json:"current_epoch_participation" yaml:"current_epoch_participation"`
JustificationBits bitfield.Bitvector4 `json:"justification_bits" yaml:"justification_bits"`
PreviousJustifiedCheckpoint *ethpb.Checkpoint `json:"previous_justified_checkpoint" yaml:"previous_justified_checkpoint"`
CurrentJustifiedCheckpoint *ethpb.Checkpoint `json:"current_justified_checkpoint" yaml:"current_justified_checkpoint"`
FinalizedCheckpoint *ethpb.Checkpoint `json:"finalized_checkpoint" yaml:"finalized_checkpoint"`
InactivityScores []uint64 `json:"inactivity_scores" yaml:"inactivity_scores"`
CurrentSyncCommittee *ethpb.SyncCommittee `json:"current_sync_committee" yaml:"current_sync_committee"`
NextSyncCommittee *ethpb.SyncCommittee `json:"next_sync_committee" yaml:"next_sync_committee"`
LatestExecutionPayloadHeader *enginev1.ExecutionPayloadHeader `json:"latest_execution_payload_header" yaml:"latest_execution_payload_header"`
LatestExecutionPayloadHeaderCapella *enginev1.ExecutionPayloadHeaderCapella `json:"latest_execution_payload_header_capella" yaml:"latest_execution_payload_header_capella"`
NextWithdrawalIndex uint64 `json:"next_withdrawal_index" yaml:"next_withdrawal_index"`
NextWithdrawalValidatorIndex primitives.ValidatorIndex `json:"next_withdrawal_validator_index" yaml:"next_withdrawal_validator_index"`
}
func (b *BeaconState) MarshalJSON() ([]byte, error) {
marshalable := &beaconStateMarshalable{
Version: b.version,
GenesisTime: b.genesisTime,
GenesisValidatorsRoot: b.genesisValidatorsRoot,
Slot: b.slot,
Fork: b.fork,
LatestBlockHeader: b.latestBlockHeader,
BlockRoots: b.blockRoots,
StateRoots: b.stateRoots,
HistoricalRoots: b.historicalRoots,
HistoricalSummaries: b.historicalSummaries,
Eth1Data: b.eth1Data,
Eth1DataVotes: b.eth1DataVotes,
Eth1DepositIndex: b.eth1DepositIndex,
Validators: b.validators,
Balances: b.balances,
RandaoMixes: b.randaoMixes,
Slashings: b.slashings,
PreviousEpochAttestations: b.previousEpochAttestations,
CurrentEpochAttestations: b.currentEpochAttestations,
PreviousEpochParticipation: b.previousEpochParticipation,
CurrentEpochParticipation: b.currentEpochParticipation,
JustificationBits: b.justificationBits,
PreviousJustifiedCheckpoint: b.previousJustifiedCheckpoint,
CurrentJustifiedCheckpoint: b.currentJustifiedCheckpoint,
FinalizedCheckpoint: b.finalizedCheckpoint,
InactivityScores: b.inactivityScores,
CurrentSyncCommittee: b.currentSyncCommittee,
NextSyncCommittee: b.nextSyncCommittee,
LatestExecutionPayloadHeader: b.latestExecutionPayloadHeader,
LatestExecutionPayloadHeaderCapella: b.latestExecutionPayloadHeaderCapella,
NextWithdrawalIndex: b.nextWithdrawalIndex,
NextWithdrawalValidatorIndex: b.nextWithdrawalValidatorIndex,
}
return json.Marshal(marshalable)
}

View File

@@ -23,12 +23,12 @@ var _ = state.ReadOnlyValidator(readOnlyValidator{})
// NewValidator initializes the read only wrapper for validator.
func NewValidator(v *ethpb.Validator) (state.ReadOnlyValidator, error) {
if v == nil {
return nil, ErrNilWrappedValidator
}
rov := readOnlyValidator{
validator: v,
}
if rov.IsNil() {
return nil, ErrNilWrappedValidator
}
return rov, nil
}

View File

@@ -75,6 +75,8 @@ func (_ *State) replayBlocks(
"duration": duration,
}).Debug("Replayed state")
replayBlocksSummary.Observe(float64(duration.Milliseconds()))
return state, nil
}

View File

@@ -34,6 +34,7 @@ go_library(
"//math:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
],
)

View File

@@ -3,12 +3,15 @@ package stateutil
import (
"bytes"
"encoding/binary"
"runtime"
"sync"
"github.com/pkg/errors"
fieldparams "github.com/prysmaticlabs/prysm/v4/config/fieldparams"
"github.com/prysmaticlabs/prysm/v4/crypto/hash/htr"
"github.com/prysmaticlabs/prysm/v4/encoding/ssz"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/sirupsen/logrus"
)
const (
@@ -51,6 +54,20 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
return res, nil
}
func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int, groupSize int, wg *sync.WaitGroup) {
defer wg.Done()
for i := 0; i < groupSize; i++ {
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
if err != nil {
logrus.WithError(err).Error("could not get validator field roots")
return
}
for k, root := range fRoots {
roots[(j*groupSize+i)*validatorFieldRoots+k] = root
}
}
}
// OptimizedValidatorRoots uses an optimized routine with gohashtree in order to
// derive a list of validator roots from a list of validator objects.
func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error) {
@@ -58,14 +75,25 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
if len(validators) == 0 {
return [][32]byte{}, nil
}
roots := make([][32]byte, 0, len(validators)*validatorFieldRoots)
for i := 0; i < len(validators); i++ {
wg := sync.WaitGroup{}
n := runtime.GOMAXPROCS(0)
rootsSize := len(validators) * validatorFieldRoots
groupSize := len(validators) / n
roots := make([][32]byte, rootsSize)
wg.Add(n - 1)
for j := 0; j < n-1; j++ {
go hashValidatorHelper(validators, roots, j, groupSize, &wg)
}
for i := (n - 1) * groupSize; i < len(validators); i++ {
fRoots, err := ValidatorFieldRoots(validators[i])
if err != nil {
return [][32]byte{}, errors.Wrap(err, "could not compute validators merkleization")
}
roots = append(roots, fRoots...)
for k, root := range fRoots {
roots[i*validatorFieldRoots+k] = root
}
}
wg.Wait()
// A validator's tree can represented with a depth of 3. As log2(8) = 3
// Using this property we can lay out all the individual fields of a
@@ -73,9 +101,7 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
for i := 0; i < validatorTreeDepth; i++ {
// Overwrite input lists as we are hashing by level
// and only need the highest level to proceed.
outputLen := len(roots) / 2
htr.VectorizedSha256(roots, roots)
roots = roots[:outputLen]
roots = htr.VectorizedSha256(roots)
}
return roots, nil
}

View File

@@ -3,11 +3,13 @@ package stateutil
import (
"reflect"
"strings"
"sync"
"testing"
mathutil "github.com/prysmaticlabs/prysm/v4/math"
ethpb "github.com/prysmaticlabs/prysm/v4/proto/prysm/v1alpha1"
"github.com/prysmaticlabs/prysm/v4/testing/assert"
"github.com/prysmaticlabs/prysm/v4/testing/require"
)
func TestValidatorConstants(t *testing.T) {
@@ -30,3 +32,28 @@ func TestValidatorConstants(t *testing.T) {
_, err := ValidatorRegistryRoot([]*ethpb.Validator{v})
assert.NoError(t, err)
}
func TestHashValidatorHelper(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
v := &ethpb.Validator{}
valList := make([]*ethpb.Validator, 10*validatorFieldRoots)
for i := range valList {
valList[i] = v
}
roots := make([][32]byte, len(valList))
hashValidatorHelper(valList, roots, 2, 2, &wg)
for i := 0; i < 4*validatorFieldRoots; i++ {
require.Equal(t, [32]byte{}, roots[i])
}
emptyValRoots, err := ValidatorFieldRoots(v)
require.NoError(t, err)
for i := 4; i < 6; i++ {
for j := 0; j < validatorFieldRoots; j++ {
require.Equal(t, emptyValRoots[j], roots[i*validatorFieldRoots+j])
}
}
for i := 6 * validatorFieldRoots; i < 10*validatorFieldRoots; i++ {
require.Equal(t, [32]byte{}, roots[i])
}
}

View File

@@ -48,8 +48,7 @@ func merkleizePubkey(pubkey []byte) ([32]byte, error) {
if err != nil {
return [32]byte{}, err
}
outputChunk := make([][32]byte, 1)
htr.VectorizedSha256(chunks, outputChunk)
outputChunk := htr.VectorizedSha256(chunks)
return outputChunk[0], nil
}

View File

@@ -71,9 +71,7 @@ func ReturnTrieLayerVariable(elements [][32]byte, length uint64) [][]*[32]byte {
}
layers[i+1] = make([]*[32]byte, layerLen/2)
newElems := make([][32]byte, layerLen/2)
htr.VectorizedSha256(elements, newElems)
elements = newElems
elements = htr.VectorizedSha256(elements)
for j := range elements {
layers[i+1][j] = &elements[j]
}
@@ -295,9 +293,7 @@ func MerkleizeTrieLeaves(layers [][][32]byte, hashLayer [][32]byte) ([][][32]byt
if !math.IsPowerOf2(uint64(len(hashLayer))) {
return nil, nil, errors.Errorf("hash layer is a non power of 2: %d", len(hashLayer))
}
newLayer := make([][32]byte, len(hashLayer)/2)
htr.VectorizedSha256(hashLayer, newLayer)
hashLayer = newLayer
hashLayer = htr.VectorizedSha256(hashLayer)
layers[i] = hashLayer
i++
}

View File

@@ -89,6 +89,12 @@ func (s *Service) Start() {
log.Debug("Exiting Initial Sync Service")
return
}
// Exit entering round-robin sync if we require 0 peers to sync.
if flags.Get().MinimumSyncPeers == 0 {
s.markSynced()
log.WithField("genesisTime", gt).Info("Due to number of peers required for sync being set at 0, entering regular sync immediately.")
return
}
if gt.After(prysmTime.Now()) {
s.markSynced()
log.WithField("genesisTime", gt).Info("Genesis time has not arrived - not syncing")

View File

@@ -31,6 +31,14 @@ func TestService_Constants(t *testing.T) {
func TestService_InitStartStop(t *testing.T) {
hook := logTest.NewGlobal()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
MinimumSyncPeers: 1,
})
defer func() {
flags.Init(resetFlags)
}()
tests := []struct {
name string
assert func()

View File

@@ -125,6 +125,12 @@ var (
Help: "Time to verify gossiped blocks",
},
)
blockArrivalGossipSummary = promauto.NewSummary(
prometheus.SummaryOpts{
Name: "gossip_block_arrival_milliseconds",
Help: "Time for gossiped blocks to arrive",
},
)
// Sync committee verification performance.
syncMessagesForUnknownBlocks = promauto.NewCounter(

Some files were not shown because too many files have changed in this diff Show More