Compare commits

...

135 Commits

Author SHA1 Message Date
terence tsao
b4437e6cec Load graffiti from file (#8041)
* Pass graffati file and   use it

* Visibility

* Parse test

* More proposal tests

* Gazelle

* Add sequential functionality

* fix length check

* Update priorities. Specified -> random -> default

* Log warn instead return err

* Comment

* E2e test

* Comment

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-04 23:15:12 +00:00
Arthur Burkart
8ad328d9b3 fix(grpcHeaders): accept values with "=" symbols (#8047)
* fix(grpcHeaders): accept values with equal signs

# What

Before this commit, it was not possible to pass in base64-encoded
content as a header value if it contained an equals sign. This commit
changes the behavior slightly. Rather than ignore key/value pairs where
the value happens to have an equals sign, we assume the first equals
sign delimits the key from the value and pass in the rest of the value
as-is.

Also, instead of printing the header name along with its value, we
print the name, so there is less risk of leaking information into logs
that shouldn't be there.

# Testing

This has not been tested in the context of the full validator client.
Instead, a small example was made to demonstrate the feasibility. The
example is shown here:

```go
package main

import (
	"log"
	"os"
	"strings"

	"github.com/davecgh/go-spew/spew"
	"github.com/urfave/cli/v2"
)

type Config struct {
	GrpcHeadersFlag string
}

func main() {
	app := &cli.App{
		Action: func(c *cli.Context) error {
			for _, hdr := range strings.Split(c.String("grpc-headers"), ",") {
				if hdr != "" {
					ss := strings.Split(hdr, "=")
					spew.Dump(ss[0])
					spew.Dump(strings.Join(ss[1:], "="))
				}
			}
			return nil
		},
		Flags: []cli.Flag{
			&cli.StringFlag{
				Name: "grpc-headers",
				Usage: "A comma-separated list of key value pairs to pass as gRPC headers for all gRPC " +
					"calls. Example: --grpc-headers=key=value",
			},
		},
	}

	err := app.Run(os.Args)
	if err != nil {
		log.Fatal(err)
	}
}
```

Example invocation:

```command
❯ go run main.go --grpc-headers=key=value,Authorization="Basic $(echo -n hello:world | base64)"
(string) (len=3) "key"
(string) (len=5) "value"
(string) (len=13) "Authorization"
(string) (len=22) "Basic aGVsbG86d29ybGQ="
```

* Adds tests to new gRPC header parsing code

* bazel run //:gazelle
2020-12-04 21:31:15 +00:00
Victor Farazdagi
21ede7634e Attestation aggregation: maxcover vs naive aggregation effectiveness test (#8043)
* maxcover performance test

* gazelle

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-04 17:24:57 +00:00
Victor Farazdagi
57b74283d3 Attestation aggregation: removes redundant code from max-cover benchmarks (#8044)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-04 16:51:31 +00:00
Victor Farazdagi
be078d6a16 Update TestMain(): do not call os.Exit() explicitly (#8046)
* update workspace

* update testmain
2020-12-04 16:10:07 +00:00
Victor Farazdagi
ccba8cfa5a Update rules_go to v0.24.9 and golang to v1.15.6 (#8045) 2020-12-04 15:28:03 +00:00
Victor Farazdagi
afbfaedea4 Unskip fixed init-sync service test (#8042) 2020-12-04 11:57:46 +00:00
terence tsao
3ce96701de Update attestation schedule log with total attester count (#8013)
* Log attesting total

* Use the right library

* Go fmt

* Use fmt

* No space is better

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-04 07:02:58 +00:00
simonatsn
d2ba45aad9 Update blst to v0.3.1 and incorporate subgroup changes (#7971)
* Update blst to v0.3.1 and incorporate subgroup changes

* go mod tidy

* gofmt

* Update bzl blst dependency

* Remove unnecessary check for nil

* Run bazel run //:gazelle -- fix

* Update blst to v0.3.2

* fix sha

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-04 06:46:08 +00:00
Raul Jordan
14e1f08208 Add Backup Webhooks to All Prysm Services With DBs (#8025)
* integrate backup webhooks

* pass slasher tests

* fix node

* cmd

* gaz

* read test passes

* radek feedback

* added comment

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-03 22:28:57 +00:00
Victor Farazdagi
647b4cf108 Round robin: half open interval in syncToFinalizedEpoch (#8039) 2020-12-03 21:55:42 +00:00
terence tsao
c090c6a1c5 Revert "Logging with PadLevelText to true" (#8036)
This reverts commit c51754fa8a.

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 20:38:00 +00:00
Victor Farazdagi
7dd0c24fea Skip bogus init-sync test (#8038) 2020-12-03 20:20:04 +00:00
Victor Farazdagi
e1755b6066 Invert enable-sync-backtracking (#8034)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 15:43:26 +00:00
Victor Farazdagi
3092f75ec2 Init sync: conditional syncing to finalized slot (#7999)
* extract sync methods

* add unit test

* gazelle

* Update beacon-chain/sync/initial-sync/round_robin.go

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* better set back step

Co-authored-by: Shay Zluf <thezluf@gmail.com>
2020-12-03 14:10:51 +00:00
Shay Zluf
821620c520 Add test for local protection genesis attestation (#7977)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 01:02:51 +00:00
Victor Farazdagi
5417e8cf31 Init-sync: enable peer scorer by default (#7974)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 00:45:20 +00:00
Radosław Kapka
323769bf1a Make TLS connections to a remote wallet non-mandatory (#7953)
* disable-remote-signer-tls flag

* use flag in edit-config

* send requests without TLS

* change warning message

* fix account list output test

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-03 00:18:15 +00:00
terence tsao
c51754fa8a Logging with PadLevelText to true (#8003)
* Use logrus formatter with pad

* Validator

* Fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-02 23:49:13 +00:00
terence tsao
2153a2d7c3 Remove logging deposit inclusion slot (#8023)
* Remove logging deposit inclusion slot

* Remove old tests

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-02 21:17:47 +00:00
Nishant Das
20514cd97f Fix Interop Mode (#7978)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-12-02 20:29:36 +00:00
Victor Farazdagi
32f6bfd0a5 update deprecated multiaddr package (#8022) 2020-12-02 11:21:43 -08:00
Victor Farazdagi
c7f7a29d7e remove redundant start slot (#7991)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-02 20:47:39 +03:00
Nishant Das
387f7b28c1 Add Buffer For Inbound Peers (#8018)
* allow inbound peers

* comment

* gaz

* gaz

* Update deps.bzl

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* add test

* re-arrange imports

* fix up

* fix tests

* gaz

* Update beacon-chain/p2p/service_test.go

* fmt

Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
Co-authored-by: Shay Zluf <thezluf@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-12-02 10:45:28 -06:00
terence tsao
cf3181e2de Update README to include branching strategy (#8006)
* Update README.md

* Update README.md

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-02 09:48:52 -06:00
Mohamed Mansour
9d2fe80140 Fix missing space in error message in account list (#8009)
When validating account list, there was a missing space. This fixes that nitpick.

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Victor Farazdagi <simple.square@gmail.com>
2020-12-02 16:02:59 +03:00
Shay Zluf
f9c696ed54 Pending block queue lock fixes (#8002)
* Unlock in case of error

* fix comment

* fix comment

* revert one defer

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-12-02 11:50:37 +02:00
Raul Jordan
3bd5e58a5c Merge branch 'master' into develop 2020-12-01 08:06:16 -06:00
Raul Jordan
fc7c6776f6 Create Bucket If Not Exists When Fetching Proposal History (#8011)
* create bucket if not exist when fetching proposal history

* adding unit test
2020-12-01 08:00:03 -06:00
Potuz
9a1423d62d Log with field error instead of err (#7998) 2020-12-01 05:38:42 -08:00
Nishant Das
a13de7da11 Remove Exclusion List (#7992)
* remove

* gaz

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-01 02:17:29 +00:00
Raul Jordan
01bf97293f Fix Miscellaneous Deep Source Issues (#8007)
* deep source fixes

* sh fixes

* fix import

* test err

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
2020-11-30 19:55:30 -06:00
terence tsao
645931802f Improve genesis event log and load blocks to fork choice log (#7946)
* Update a few logs

* Go fmt

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-12-01 00:06:16 +00:00
terence tsao
ea10784a4a Validator logging: return early if no att included (#7979)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-30 23:28:12 +00:00
terence tsao
3af7809964 Save cached state summaries on Stop() (#7988)
* Save cached state summaries on Stop()

* Fixed test and added one more test
2020-11-30 17:08:23 -06:00
terence tsao
b150acffca Recover state summary for sync validation (#7994)
* Recover state summary

* Add test

* Fix tests

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-29 20:42:45 -06:00
Victor Farazdagi
54a42ce4a8 more robust processing of invalid head slot (#7990) 2020-11-29 15:03:25 -06:00
Raul Jordan
9d174d5927 Add Support for Prysm Web V1.0.0-beta.1 (#7983)
* new web release

* site data update, add /logs to logs endpoints

* tests fixed

* test

* gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-27 20:30:12 +00:00
terence tsao
1b1b36497f Add validator bolt db metric collector (#7982)
* Validator db: add metrics collector

* Use the right library

* Go fmt

* Fix tests
2020-11-27 20:03:44 +00:00
Raul Jordan
04615cb97b Add Validator RPC Endpoint to Retrieve Beacon + Validator Logs Websocket Endpoints (#7981)
* add logs endpoint

* commands to retrieve logs endpoints

* ensure works at runtime

* add auth
2020-11-27 18:28:45 +00:00
terence tsao
b243665d3e Save lowest source and target epochs in post (#7973)
* Replace highest with lowerest

* Update validator/db/kv/attestation_history_v2.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/db/kv/attestation_history_v2.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Invert equality for saveLowestSourceTargetToDB

* Save lowest epcohs at post signature update

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-26 19:58:20 +00:00
pinglamb
654ef1afe5 Added flag for specifying header request limit (#7896)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-26 18:38:52 +00:00
Preston Van Loon
0dbf5c4e63 Increase public key cache size for Pyrmont (#7967)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-26 18:04:18 +00:00
terence tsao
c456dcdce8 Replace highest with lowest for signed epoch DB methods (#7965)
* Replace highest with lowerest

* Update validator/db/kv/attestation_history_v2.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Update validator/db/kv/attestation_history_v2.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>

* Invert equality for saveLowestSourceTargetToDB

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-26 17:35:36 +00:00
Raul Jordan
10857223d0 Delete Dead Code in the Validator Client's Database Package (#7970)
* delete deprecated

* mod

* deep source

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-26 16:50:55 +00:00
Nishant Das
4ef8a0f99e Update Geth Again (#7968)
* fix again

* tidy up
2020-11-26 09:52:27 -06:00
Raul Jordan
af0977b8d0 Implement Export Block Proposals (#7964)
* export funcs for proposals

* add failing test

* round trip test passes

* progress

* deepsource

* Update validator/slashing-protection/local/standard-protection-format/export.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update validator/slashing-protection/local/standard-protection-format/import.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* revert

* empty root

* deep source

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-26 04:09:35 +00:00
terence tsao
528272fc66 Add Getters/Setters in ValidatorDB For Highest Signed Source and Target Epochs (#7961)
* Add getters and setters for source and target epochs

* Use the correct ImportStandardProtectionJSON

* Add tests

* Fix buckets

* Fix source to target

* Update validator/slashing-protection/local/standard-protection-format/import.go

* Fix bytesutil.BytesToUint64BigEndian will return 0 if input is less than 8 bytes

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-26 02:39:23 +00:00
Raul Jordan
c5c868d0a6 Use Separate Buckets to Store Special Data for Proposal Slashing Protection (#7963)
* ignore keys in the schema

* add test

* use separate buckets for special values where pubkey is the index

* test fix
2020-11-25 23:58:01 +00:00
terence tsao
622ab94465 Move Verified and saved pending attestations to pool to debug (#7928)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-25 22:52:57 +00:00
Raul Jordan
ae8a619775 Allow Optional Signing Roots in Proposal History (#7960) 2020-11-25 22:24:07 +00:00
Raul Jordan
36b1eb66d5 Add Attested and Proposed Public Keys DB Methods (#7959)
* add attested and proposed pubkeys methods

* Update validator/db/kv/attestation_history_v2.go

Co-authored-by: terence tsao <terence@prysmaticlabs.com>

Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-25 21:37:39 +00:00
Nishant Das
639dcb028c Fixing Validator Config (#7940)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-25 20:40:18 +00:00
Raul Jordan
edb40ddea4 Add ValidatorDB Methods for Highest and Lowest Signed Proposals (#7957)
* add in highest and lowest signed proposal

* begin adding tests

* add in db tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-25 20:04:43 +00:00
Preston Van Loon
0d2e3d978c Delete VERSION (#7958)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-25 19:20:57 +00:00
Preston Van Loon
29c6a0c42c Fix zero genesis check, make processAttestation routine wait for genesis time to be set (#7947)
* Fix zero genesis check, make processAttestation routine wait for genesis time to be set

* Update beacon-chain/blockchain/receive_attestation.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-25 18:52:59 +00:00
Nishant Das
aefa3e191a Check Sync Status First (#7895)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-25 18:27:27 +00:00
Preston Van Loon
987205afc6 Update security.txt (#7956) 2020-11-25 17:38:35 +00:00
dv8silencer
3ae4b793e6 Improve error log for when database network conflicts with runtime network (#7945)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-25 05:15:30 +00:00
terence tsao
600427bdb0 More bootnodes (#7948) 2020-11-25 04:25:47 +00:00
terence tsao
c7dd33431f Add lighthouse bootnodes (#7943)
* Add lighthouse bootnodes

* Remove extra line
2020-11-24 18:03:28 +00:00
Shay Zluf
0cf9800b75 Fix locks and fallback to db read if attestation history map is missing a pub key data (#7937)
* minimal change to handle nil attesterHistoryByPubKey

* Revert "Always Update Attesting History If Not Slashable (#7935)"

This reverts commit 3cc2ebc5d5.

* remove unused functions

* move save before propose

* wait before go func

* move wait into the go routine

* handling map mutation

* remove map handling in this case

* log in case it is still not found

* fix log

* fix locks

* Update validator/client/attest_protect.go

* remove code duplication

* remove method extraction

* move metrics to their appropriate place

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-24 11:24:23 -06:00
Raul Jordan
3cc2ebc5d5 Always Update Attesting History If Not Slashable (#7935)
* update attesting history always if not slashable

* initialize empty if no history found

* rem duplicate logic
2020-11-24 02:48:20 +00:00
Raul Jordan
2cb814648a Improve Slashing Protection for V1, More Tests and Observability (#7934)
* tests tests and more tests

* tests all passsss

* log for double vote
2020-11-23 19:03:04 -06:00
Raul Jordan
dc897a2007 Optionally Save Wallet Password on Web Onboarding (#7930)
* persist wallet password to wallet dir if onboarded via web

* add flag

* gaz

* add test

* fmt

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-23 22:11:42 +00:00
terence tsao
a051e684ae Update log levels (#7931) 2020-11-23 13:16:08 -08:00
Radosław Kapka
64be627a6d Make grpc-headers flag work (#7932) 2020-11-23 20:38:32 +00:00
Nishant Das
6f766ed583 Exit Pending Queue Properly (#7927)
* exit properly

* terence's review

* Update beacon-chain/sync/pending_blocks_queue.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update beacon-chain/sync/pending_blocks_queue.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* fix tests

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-23 19:34:40 +00:00
Raul Jordan
b0dfc46603 Fix Up READMEs for Mainnet (#7910)
* fix up readmes

* Update README.md

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-23 18:47:55 +00:00
Nishant Das
8c3faaa4c7 Add Back Error/Debug Logs (#7922)
* add back logs

* add back string

* Reformat loggings

Co-authored-by: Terence Tsao <terence@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-23 18:29:27 +00:00
Nishant Das
4c0db8bca4 Bake In Mainnet Bootnodes (#7925)
* add in bootnodes

* fix teku's bootnode
2020-11-23 17:19:35 +00:00
Raul Jordan
0c5c246ee7 Prysm Web V1 Release (#7921)
* even more cors

* auth fixes for web v1

* ensure web works

* include web ui v1 release

* new site data

* fmt

* test

* tests pass

* gaz

* build fix

* no ssz

* unused type

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-23 06:29:58 +00:00
dv8silencer
f871e1f3ef Give error message if trying to import into non-imported wallet (#7913)
Co-authored-by: dv8silencer <15720668+dv8silencer@users.noreply.github.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-23 04:08:07 +00:00
Preston Van Loon
658dd95313 Add teku's bootnode (#7919)
* Add teku's bootnode

* Ignore mainnet config for TODO check
2020-11-23 01:20:55 +00:00
Preston Van Loon
57fe012bc2 P2P: Increase outbound message queue size to 256 (#7916) 2020-11-22 23:54:58 +00:00
Raul Jordan
d62420b940 More Default CORS Rules (#7915)
* even more cors

* terence feedback

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-22 23:10:21 +00:00
Preston Van Loon
11bbea2562 Use params network config as default bootstrap nodes and deposit contract (#7904)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-22 21:22:44 +00:00
terence tsao
2172cd60a6 Update sync loggings (#7914) 2020-11-22 14:31:55 -06:00
Raul Jordan
2a546cc50b Remove Deprecated Tooling (#7912)
* remove old tools

* tidy and gaz

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-22 19:07:02 +00:00
terence tsao
7d0031ee77 Update boot node to not use pyrmont (#7906)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-22 18:47:23 +00:00
pinglamb
acf49fb38f Fix Alpine Docker Image (#7883)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-22 18:04:13 +00:00
Raul Jordan
26658a9f1f Add More Default CORS Domains to the gRPC Gateway (#7901) 2020-11-22 17:12:24 +00:00
Nishant Das
60d99c83eb fix pending queue (#7899) 2020-11-22 08:42:27 -08:00
Alon Muroch
98557e8f5e highest slashing attestation RPC endpoint (#7647)
* highest slashing attestation RPC endpoint

* slasher mock fix

* Update proto/slashing/slashing.proto

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* comments + small fixes

* PR review ctx comments and fixes

Co-authored-by: Shay Zluf <thezluf@gmail.com>
2020-11-22 08:51:20 +00:00
Preston Van Loon
1ba747b3c9 Change log from ERROR to DEBUG (#7892) 2020-11-21 23:14:19 -08:00
Preston Van Loon
71ec919306 RPC: healthz should return an error when the node is syncing (#7890)
* RPC: healthz should return an error when the node is syncing

* fix test

* fix test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-22 00:29:55 +00:00
Fabrice Cheng
34a26740ff Change attestation signature error to DEBUG level (#7891) 2020-11-22 00:08:51 +00:00
Nishant Das
7e76b02bb7 Make Follow Distance Lookup Simpler (#7884)
* faster eth1 search

* simplify it much more

* Update beacon-chain/powchain/block_reader.go

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-21 22:03:16 +00:00
pinglamb
519b003fc3 Fix creation time of beacon-node, validator and slasher (#7886)
Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-21 21:37:03 +00:00
Preston Van Loon
9a10462c64 p2p: return error when attempting to connect to a bad peer (#7885)
* return error when attempting to connect to a bad peer

* temporarily skip test
2020-11-21 20:09:07 +00:00
Nishant Das
ac60ff2bc2 Add Test For Earliest Voting Block (#7882) 2020-11-21 12:52:42 +00:00
Ivan Martinez
f8a855d168 Remove outdated code in accounts (#7881)
* Remove outdated test in accounts

* gaz
2020-11-21 11:15:44 +01:00
Preston Van Loon
74c7733abf Fix spec diff with comments. Fixes #7856 (#7872)
Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-11-21 06:12:47 +00:00
terence tsao
f63e89813d Remove chain not started error (#7879)
* Remove chain not started error

* Add genesis state not created error
2020-11-21 01:28:55 +00:00
terence tsao
c021e2e8bc Remove deprecated feature flags (#7877)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-21 00:15:44 +00:00
Preston Van Loon
c3fc40907d Fix potential panic with nil *big.Int (#7874)
* Fix potential panic with nil \*big.Int

* regression test

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-20 23:09:02 +00:00
Shay Zluf
3fb78ff575 Verify GenesisValidatorRoot Matches the One in DB on Slashing Protection Import (#7864)
* Add GenValRoot dbs

* Test genvalroot

* Fix names

* Add overwrite rejection

* validate metadata genesis validator root

* remove env

* fix database functions

* fix tests

* raul feedback

Co-authored-by: Ivan Martinez <ivanthegreatdev@gmail.com>
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-20 22:33:51 +00:00
Raul Jordan
7bd97546f0 Dynamic Reloading of Keys on Any FSNotify Event (#7873)
* dynamic import

* add tests

* spacing
2020-11-20 22:04:59 +00:00
Ivan Martinez
5140ceec68 Hotfix for WaitForChainStart GenesisValidatorsRoot Check (#7870)
* Hotfix for genesis val root

* Add regression test

* Fix error message

* Remove comments

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-20 20:53:12 +00:00
terence tsao
97ad5cd5fd Reduce no attestation in pool to warn (#7863)
* Reduce no attestation in pool to warn

* Use NotFound

* Update validator/client/aggregate.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

* Update validator/client/aggregate.go

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-20 12:17:26 -08:00
Ivan Martinez
4dc65c5787 Save GenesisValidatorsRoot from WaitForChainStart (#7855)
* Add GenValRoot dbs

* Test genvalroot

* Fix names

* Add overwrite rejection

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-20 18:06:12 +00:00
Roy
1b012ccfa5 Various Powershell Fixes (#7854)
* Remove incorrect x64 error message when showing usage description

* Add missing escape characters in usage description

The actual environment variable value would be printed without these
escape characters.

* Add missing quotation marks in usage description

* Also test existence of sha and signature files

For multiple reason the executable could be downloaded, but not the
signature files. Later on the script will error out because these files
are lacking.

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-20 17:17:38 +00:00
Nishant Das
60cdd69b05 Update Gossipsub Parameters (#7869)
* add param and flag

* change back
2020-11-20 15:36:02 +00:00
Preston Van Loon
90a66df529 Update eth2 specs version badge in README (#7865) 2020-11-20 03:21:11 +00:00
Nishant Das
c4a1fe4d0d Add Basic Support for IP Tracker (#7844)
* add basic support for ip tracker

* clean up

* check for it

* fix

* Update beacon-chain/p2p/peers/status.go

* fix
2020-11-19 12:54:19 +00:00
Nishant Das
8a256de2dd Check Target Root Better (#7837)
* check better

* bring it down

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-19 11:13:54 +00:00
Nishant Das
c3451a6ce9 Cache ETH1 Headers When Requesting Logs (#7861)
* perform a quick patch

* perform a quick patch

* fix

* fix up

* Update beacon-chain/powchain/service.go

* start caching from here

* remove

* fix
2020-11-19 10:47:31 +00:00
terence tsao
4b6441f626 Pending block queue caching with TTL (#7816)
* Update pending blks queue to ttl one

* Update tests

* Comment

* Gazelle

* Fix fuzz

* More comments

* Fix fuxx import

* Nishant's feedback

* Happy lint

* Return error for len(blks) >= maxBlocksPerSlot

* Ensure proposer time conv

* don't use gcache's default exp time it's 0

* fix TestService_AddPeningBlockToQueueOverMax

* Update beacon-chain/sync/pending_blocks_queue.go

Co-authored-by: Nishant Das <nishdas93@gmail.com>

* Fix time conversion

Co-authored-by: Nishant Das <nishdas93@gmail.com>
2020-11-19 05:15:58 +00:00
Nishant Das
eb7ab16f92 Change Back Metadata Error Check (#7852)
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-19 04:17:26 +00:00
Nishant Das
e6ecda5ebe add check and test (#7853)
Co-authored-by: terence tsao <terence@prysmaticlabs.com>
Co-authored-by: Radosław Kapka <rkapka@wp.pl>
2020-11-19 11:53:24 +08:00
Victor Farazdagi
095c4d5dd5 Peer status peer scorer (#7480)
* define and enforce minimum scorer interface

* better decoupling of multiple scorers in service

* removes redundant weight

* adds peer_status scorer

* minir re-arrangement

* rely on scorer in peer status service

* gazelle

* updates rpc_status

* fix build

* better interface verifying

* remove unnecessary locks

* mark todo

* simplify service

* remove redundant references

* avoid passing contexts

* remove unused context

* refactor errors to p2p package

* refactor goodbye codes into p2p

* simplify status api

* remove isbad method from peers

* update scoring service

* introduce validation error

* gazelle

* add score

* restore isbad method

* resolve dep cycle

* gazelle

* peer status scorer: test score calculation

* bad responses scorer: bad peer score

* remove redundant type checks

* pass nil config

* add rounding

* test IsBadPeer

* test bad peers list

* more tests

* check validation error on non-existent peer

* max peer slot -> highest peer slot

* remove redundant comment

* combine

* combine

* introduce var

* fix tests

* remove redundant update

* minor fix

* Nishant's suggestion

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-18 15:51:42 +00:00
Nishant Das
59d63087b1 Save Powchain Metadata To Disk On Chainstart (#7850)
* save to disk

* log error
2020-11-18 21:44:06 +08:00
Nishant Das
e1dd532af3 handle correctly (#7851) 2020-11-18 21:12:12 +08:00
Ivan Martinez
cfed4fa1b5 Remove listen for ChainStarted in WaitForChainStart (#7849)
* Remove GenValRoot from ChainStarted and remove ChainStarted from WaitForChainStart

* Fix test and add logs
2020-11-18 05:51:00 +00:00
Victor Farazdagi
7735a083b2 Extract common types from sync (#7843)
* extract common types from sync

* fix tests

* simplify

Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-18 04:17:42 +00:00
Ivan Martinez
fec469291e Add GenesisValidatorRoot to ChainStartResponse (#7846)
* Add genesis validator root to chainstartresposne

* Deps

* Tidy

* Fix tests

* Fix test

* Fix test and add to ChainStartedData
2020-11-17 20:15:48 -06:00
Shay Zluf
acb47f2920 Implement Standard Slashing Protection JSON With Importing Logic (#7675)
* Use new attestation protection

* tests fixes

* fix tests

* fix comment

* fix TestSetTargetData

* fix tests

* empty history handling

* fix another test

* mock domain request

* fix empty handling

* use far future epoch

* use far future epoch

* migrate data

* copy byte array to resolve sigbus error

* init validator protection on pre validation

* Import interchange json

* Import interchange json

* reduce visibility

* use return value

* raul feedback

* rename fixes

* import test

* checkout att v2 changes

* define import method for interchange format in its own package

* rename and made operations atomic

* eip comment

* begin amending test file

* finish happy path for import tests

* attempt the interchange import tests

* fixed tests

* happy and sad paths tested

* good error messages

* fix up comment with proper eip link

* tests for helpers

* helpers

* all tests pass

* proper test comment

* terence feedback

* validate metadata func

* versioning check

* begin handling duplicatesz

* handle duplicate public keys with potentially different data, first pass

* better handling of duplicate data

* ensure duplicates are taken care of

* comprehensive tests for deduplication of signed blocks

* tests for deduplication

* Update validator/slashing-protection/local/standard-protection-format/helpers_test.go

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* Update validator/slashing-protection/local/standard-protection-format/helpers_test.go

Co-authored-by: Shay Zluf <thezluf@gmail.com>

* tests for maxuint64 and package level comment

* tests passing

* edge cases pass

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-17 22:37:43 +00:00
terence tsao
925fba0570 Validate beacon block in pending queue (#7847) 2020-11-17 13:50:51 -08:00
dv8silencer
1a72733c53 Handle duplicate keystores in import path without error (#7842)
* bug fix

* Add regression test

* improve wording

* improve wording

* fix test

* comments, wording

* Comment

* import hex output

* fix test

* remove unnecessary sprintf

* fix test

Co-authored-by: dv8silencer <15720668+dv8silencer@users.noreply.github.com>
2020-11-17 13:50:23 -06:00
Shay Zluf
2976bf7723 Source lrg target (#7839)
* handle source > target better

* promatheus metric for source > target

* handle source > target well in sig bytes

* Update slasher/detection/attestations/spanner_test.go

* Update slasher/detection/attestations/spanner_test.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-17 17:17:21 +00:00
terence tsao
7c54cfea3f Hardening unaggregated attestation queue check (#7834)
* Add more checks and tests

* Move VerifyLmdFfgConsistency

* Move VerifyFinalizedConsistency

* Move VerifyFinalizedConsistency higher

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
Co-authored-by: Nishant Das <nishdas93@gmail.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-17 16:31:43 +00:00
Fabrice Cheng
d3f8599d19 Add indicator for disabled accounts in account list (#7819)
* add indicator for disabled accounts in `account list`

* add also the account name in red for disable accounts

* bold disable as well

* Update validator/accounts/accounts_list.go

Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-17 09:58:53 -06:00
Victor Farazdagi
2034c662af Refactor scoring service (#7841)
* refactor scoring service

* fix anti-pattern issue

* add block providers bad peers detection tests

* check status when peer scoring is disabled

* more tests
2020-11-17 23:28:13 +08:00
terence tsao
ad5151f25d Hardening aggregated attestation queue check (#7826) 2020-11-17 07:25:18 +00:00
Raul Jordan
f75a8efc0d Remove Keymanageropts Pattern from Wallets and Remove Enable/Disable Feature for V1 CLI (#7831)
* rem opts

* rem more km opts

* more removal of km opts

* removal of km opts

* definition of internal accounts store

* refactor enable/disable

* enable build

* fix rpc

* remove keymanageropts

* fix imported tests

* table driven tests for enable disable

* table driven tests for disable

* comprehensive tests for disable

* tests complete for enable and disable

* pass enable disable tests

* clarify imported

* fix deadlocks

* imported tests pass

* remove enable disable entrypoints

* better derived text

* deep source suggestions

* gaz

* tidy

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
Co-authored-by: prylabs-bulldozer[bot] <58059840+prylabs-bulldozer[bot]@users.noreply.github.com>
2020-11-17 06:00:20 +00:00
Nishant Das
39817c0586 Add Back Flag to Subscribe to All Subnets (#7836) 2020-11-17 05:25:35 +00:00
Nishant Das
168cffb0dd Check Sub Group for Herumi and Fix Edge Cases (#7823)
* check for herumi

* clean up

* fix tests

* fix
2020-11-17 04:12:23 +00:00
yorickdowne
194ee7c439 Add --mainnet no-op to validator sub-commands (#7833)
Co-authored-by: Raul Jordan <raul@prysmaticlabs.com>
2020-11-17 03:17:08 +00:00
Ivan Martinez
5889670cc7 Remove WaitForSynced (#7835)
* Remove waitforsynced

* Remove WaitForsynced entirely

* Fix bazel

* tidy
2020-11-16 20:48:16 -06:00
Raul Jordan
7449eba612 Refactor HD Wallets for Enhanced Security (#7821)
* begin hd wallet refactor

* further simplify the new derived keymanager

* make it almost a full wrapper around an imported keymanager

* fix up the EIP test

* deprecated derived

* fixing keymanager tests

* fix up derived tests

* refactor initialize keymanager

* simplify hd

* pass some tests

* pass accounts list test

* gaz

* regenerate protos without create account privilege

* enforce account recovery on wallet create

* allow accounts delete to work

* remove mentions of accounts create

* resolve comments and go mod

* fix up tests

* build fixes

* remove insecure warning

* revert

* fix proto file

* remove create account message

* gaz

* remove account create

* update web api protos

* fix up imports

* change func sig

* tidy

Co-authored-by: Preston Van Loon <preston@prysmaticlabs.com>
2020-11-16 22:26:04 +00:00
Preston Van Loon
d85cf028ef Update go-pbs after v1 changes (#7830) 2020-11-16 21:14:04 +00:00
terence tsao
71c6164c42 Remove a few old metrics (#7825) 2020-11-16 18:27:41 +00:00
Nishant Das
83601245f2 update geth (#7824) 2020-11-16 09:29:08 -06:00
james-rms
758ec96d6d beacon-chain: fix segfault (#7822)
Observed this segfault running all tests on mater, occurring
in around 2-3 out of 10 test runs.

```
FAIL: //beacon-chain/sync:go_default_test (shard 3 of 4, run 1 of 10) (see /home/j/.cache/bazel/_bazel_j/1ba834ca9d49f27aeb8f0bbb6f28fdf3/execroot/prysm/bazel-out/k8-fastbuild/testlogs/beacon-chain/sync/go_default_test/shard_3_of_4_run_1_of_10/test.log)
INFO: From Testing //beacon-chain/sync:go_default_test (shard 3 of 4, run 1 of 10):
==================== Test output for //beacon-chain/sync:go_default_test (shard 3 of 4, run 1 of 10):
panic: runtime error: invalid memory address or nil pointer dereference
[signal SIGSEGV: segmentation violation code=0x1 addr=0x0 pc=0x138eea6]

goroutine 1660 [running]:
github.com/prysmaticlabs/prysm/shared/abool.(*AtomicBool).IsSet(...)
	shared/abool/abool.go:39
github.com/prysmaticlabs/prysm/beacon-chain/sync.(*Service).subscribeStaticWithSubnets.func1(0xc002dd4400, 0xc002990940, 0x17bca26, 0x1e)
	beacon-chain/sync/subscriber.go:207 +0xe6
created by github.com/prysmaticlabs/prysm/beacon-chain/sync.(*Service).subscribeStaticWithSubnets
	beacon-chain/sync/subscriber.go:200 +0x172
================================================================================
```

TestStaticSubnets was testing a Service with an uninitialized
chainStarted value. This commit initializes chainStarted explicitly
in all tests that construct a Service. This reduces the observed flake
rate to 0/10 runs. This was verified with:

```
./bazel.sh test //beacon-chain/sync:go_default_test --runs_per_test 10
```
2020-11-16 12:10:34 +01:00
terence tsao
977e539fe9 Loadblock returns err on invalid range (#7811)
* Return error on invalid range and fix tests

* Uncomment some test codes

* Update comment

* Sync with master, fixed more tests

* Rm error condition, update comments, tests
2020-11-16 01:06:13 +00:00
376 changed files with 13098 additions and 11514 deletions

218
.well-known/security.pub Normal file
View File

@@ -0,0 +1,218 @@
-----BEGIN PGP PUBLIC KEY BLOCK-----
mQINBFpZUO0BEAC/tqN6QctJKSOF+A7jrBzvNvs6rVEi9Hn55u/scKNIPkSRJRTy
8kVsDMha+EKXka8oQiVM+sHoHMIMCa3tdicm1/k5f67X8dqadQmFP4DhYRYIKCKT
TkntNG568ynf/yUs/YY9Ce8H17JvgytkLK50mxMxycUlYREMaRPMR8Wt1Arrd9QT
U0I2cZemeqUORKuiVuZjj/7BVDHvSXHvzpi5zKI86sJQTnzcuGqyNsrUU4n4sYrb
I+0TfEHzmSdxoAXSaMYjomPLmbaSdBiK/CeNKF8xuFCKJRd8wOUe5FcIN3DCdk0e
yoFye5RMC+W09Ro+tK/jTQ/sTUuNLJm0VUlddQQeoGlhQdLLwiU4PJqcyeL4KaN1
l8cVml7xr1CdemhGV4wmEqAgxnNFN5mKnS2KcDaHxRz7MoGNdgVVQuxxaE0+OsdJ
zCKtA12Q/OcR24qYVFg5+O+bUNhy23mqIxx0HiQ0DsK+IDvcLLWqta0aP9wd9wxG
eObh9WkCxELTLg8xlbe0d7R3juaRjBLdD5d3UyjqGh+7zUflMsFhpUPraNXdKzvm
AqT25cveadM7q/CNzFXeCwmKjZab8Jic8VB80KcikmX6y9eGOHwjFixBogxowlBq
3KpeNTqJMNHYBzEb0V18P8huKVO0SMfg11Z1/nU4NA4lcjiVImb5xnJS0wARAQAB
tCxQcmVzdG9uIFZhbiBMb29uIDxwcmVzdG9uQHByeXNtYXRpY2xhYnMuY29tPokC
NwQTAQgAIQUCWuZ7uwIbAwULCQgHAgYVCAkKCwIEFgIDAQIeAQIXgAAKCRBy4z5N
8aUDbtchD/4hQDTj7qsccQKpaL8furw+OWuoZtX38smO9LrcSoLaZlk5NRxFNlhQ
FaL6iyDLoDkWUXEPg94x1dMFBUfMdIuh6iEF2pcvCN6WG3Pd2j2kG6OxaIx+rI7a
eaD2Wds312etYATu+7AI6pxlQVqPeZi6kZVvXo8r9qzEnl1nifo7d3L4ut6FerDz
/ptvIuhW++BEgksILkwMA44U7owTllkZ5wSbXRJer0bI/jLPLTaRETVMgWmF5L2n
PWKhBnhBXf/P00zTHVoba0peJ/RGdzlb1JZH+uCQk0wGBl0rBMUmiihIB8DZBZDX
5prwMdoHG9qAT9SuJ8ZOjPKaBHVVVw4JOU6rX2+p9E49CVATsdoPfWVbNyVRGi5f
Oo0bJPZU3uO10Q09CIeyqT6JPDCZYS7po2bpfFjQTDkoIv0uPWOsV5l3cvFxVlcD
Pvir4669xhujmoVBOrp18mn/W/rMft2TJ84inp0seKSKdwBUeEyIZwwu1YTorFe4
bgJghDu/Y+K4y0wq7rpPimoiXSoJOaaIPrOzsKAVu20zJB4eoulXlPwHexix8wwf
xeVH4bGl3wtTFWKja0+EQVdxpt+uUlABvrheQbNlNGz5ROOAtjvwASI3YW/jVVaG
wSbOUuMEHfC1rSnj5Y9fN6FbilxJAZ2UbvZE6iXqqemfRdFuB5yedbQmUHJlc3Rv
biBWYW4gTG9vbiA8cHJlc3RvbjkwQGdtYWlsLmNvbT6JAjcEEwEIACEFAlqrvbMC
GwMFCwkIBwIGFQgJCgsCBBYCAwECHgECF4AACgkQcuM+TfGlA269zg/9FynobfLE
Vh+Aid7l5C6pprHflHpdNVhdHzjvosLKfPMCcBJLyCLAIueKC0vvKbt5kyNV6Adl
6fibdO/vRrNsEdmKXTdfDnN03yVX+VO54rqyuweR09CbLHakECh5awWpk0x9E/Gc
3MlU8YDCVNPcWAw8JPZZ0y6s50/MFFXya+/opw45neH6WXrbqbIapzZ4V1bhkloF
TTz7LYp1dk6qjus/f1jHgujclJ6FazG+OqCpzJ22lnuwNYs8sv1DclW+lEUfIin5
PvzcSRCCd6NilFRSGzfBhM7wxZrAp0JzWXpM1jmd2WHtgErusTaTXRTATjPf9Chg
SE9UT3EJvJ5fPxuxm+qOAowpJwe8Irv+YuMzL8C6C2BhZIV8XID3/cErNeQbPocj
QFksmBEwpe9EG3Yhd5SmSXRhCtBFnyFKyOqPpAE2s+U0b7qsnWc50UFbIytPEm4C
YmdNL6IVilp/+LYFcRWWK/ppOtvtgbj8r7+Foidi/tCauJGt2BzhWH7DkLEdXGxk
PfR/rTgyuAQZowl03DaNwAPKegY2SIuROO9kpQACwTWZNg4l2yrZv09qrKBhohyn
b2i4pwHPZ5/bWLikdzENcJuvbH5qhf6tV0FIaNGbTkWX00uI++bvWAxxE25jZv56
e7VTaWuMt8Ly4DBcwl5JyWuvZ74+yv7QGp20WlByZXN0b24gVmFuIExvb24gKDB4
ZjcxRTlDNzY2Q2RmMTY5ZURGYkUyNzQ5NDkwOTQzQzFEQzZiOEE1NSkgPHByZXN0
b25AbWFjaGluZXBvd2VyZWQuY29tPokCNwQTAQgAIQUCWllQ7QIbAwULCQgHAgYV
CAkKCwIEFgIDAQIeAQIXgAAKCRBy4z5N8aUDbmvdD/4jkT1IXJyj65sgHus0HHYC
MBp6mzvtpDcPljg/5Nl1WXydv4zYGnEnIwWIqDYwwvokOKllxAjgevcplHqZa0cb
7XKCFk5h+56FLPHv9e0WK1fOFeo2ad3imNRstSHgaumGAWHJMg9vWbDisGv1zjQ0
usFkrMKoX34YzMt8KBD7IuQyeBkYNupT8EfByeA9Ta+wkvS+X6BojsFB1UWDAFCY
z8RgTcWIFjWtGZwIkWUKCzebqXbElpJF8ckZU9q4qVhyZ2DT+8BS/Lm0Sf4z6VTC
4EN10ZuN+F+J3DMR2Zuudp4X5OUGkDG4KuU/kvj6EJRWpbTS1D9ReK7hoApIbV72
Um6Mf7kC9EKvxgL1gOfl4aj3Io9M0R8FT/0WSMduQzf/jI3XqdNH0nYo2kTDdZm8
S972cyvt6frKYy6RsOlPz1+S61iCmupsRY+HyKDTa0vnpXg2c4dE1neF5eMI6SVw
viJvCG2cflctJ2PLiINZJYAg87gV5Rm1i/Lt2YG5zdxAXXJt2R0uuU9fv4DcHZLU
yx69Yuh+5UiEaXYU7xoRCdZJYyHbvaC2hPcDGnEa3K1QbgnI5hGAtG3cwdbYC5e3
bcaJb/LdwzkdRnHLgpxa/UTAIfejEI1U2kvVvNoe/HvEXq/OPrhFDvE4rW8AzbX+
ISTWWRY0lLSr8/SD0TDJMbkBDQRam2geAQgA0kESOibOO3CcXrHtqP9lPGmj6rVe
G18fRmPDJiWtx863QqJeByuuUKwrGkPW/sqtIa5Ano+iXVHpk7m955nRjBmz4gd8
xqSGZd9XpNObYPA5iirAO8ztpgQvuvsHH9y9Ge50NnR7hQSMUbGVeCUU/vljwT60
/U+UPnsTAmkhvkEI72x50l5Ih9ihcBcr5jJpi08XktE3sFOoannac0kZQJ6WXFrY
4ILbv8fVqcRf44xMKOlFB9qHhleGW0H9ZUjTKs9quRt7r5D1MOiwrZDjNN3LqMco
oWj37hb+3KkmIIsAYB2DjnWYkMPu2B0O4uSOHYAWfEJtRvA8qW7sWj+q1wARAQAB
iQIlBBgBCAAPBQJam2geAhsgBQkB4TOAAAoJEHLjPk3xpQNulz0P/2m9veAAGGCO
yPsqiNVVa8lrhmLGh/W+BaoszQ/r+pfin4xTOV5K5h3MC5CVJM0JxP/cxNol+Nmr
cYGbZgq4QhLlH6PdQ7cReL5ED6Wi+eHb4ocvXJqUuZ2Gl8Z7gzQzp+WFbLrn8vXj
LcyGGEETV84jy+X5cKu24eAjTFK+unfqwfxXFZP5vhIEVe7+uG6A/pMB5eLDqEUh
FQUcWqCF2Imt08Kcn7HL31GoIY0ABHdD+ICXZE5lTm6iJGzpFBKdTMm/e5igCJ3v
/tgtWZbnvyseLR/T/gU1JyheS9kNXP5sICwicBbY/vnGdEP6OKpDOSfQam5x4BBj
cvsBnsNuonY7OJn4iLY6LviQ0MM91PbaJUUJrp9Uyi4hj9iO/MZBaG0Giu0FKjG6
Vk+RlCYPjYIvHflQLZHe9BWLPN1AvaLKszt3IYaxS5seXCu0ZqHDGCBVqVCNDDJk
MJbHlrOVZ9T6mZhA+aQ1EJvTWk3MNj1AOyCJdiXtOOdg+4Fm5dN7nLpumLIg2yP2
afI7YfrPGA7sm+T0IMnOWxkK+KODC7OV9h/QjyBJDcUYGXLapuK9eP80cr8hKvH7
S3G4Top/rXDpnBNQ2azCqmUwaJWNRhLucC80Vd00d4pWIOwAPWpiV70Fq8OhQFhT
PNXwFXVLwtNfPvPxN1s+Vk+BBXp+M19AuQENBFqbaC8BCADSq89Z9xWRS2fvWI/N
+yEWliIU8XiqC9Ch+/6mS2LEyzB1pPLIIQcRvM6rq2dxXIRveVGpb63ck9vUtuJG
//es+DnDkO7re+ZmWHX+LAqMYNdaobSYxHkkR4CcY2HbPSEUbb//Zwk4BDyp3g3W
bKK9noVbslZuSwWNrxjX/Hieh/dIGYkNFeWOlmNfUYsevzqNDjsziOagyXKxLc++
hUM3GKgzXRQJBvBpgzQc4bRY+YGHXtZurk9AiZ4ZBhWv2Qrb5OYYislE9sdv3KWV
Iv1EBpbkAJ9MM1HgS8bkIOIpNs4XxHY6fTWWdrXi+NgZXQwQRYaWTQsXL3mktarS
fFfTABEBAAGJAiUEGAEIAA8FAlqbaC8CGwwFCQHhM4AACgkQcuM+TfGlA24vqg/8
CsVBHO4mh8SSaxdWNEU+mG4pU230BRCwrfn42wuSKb7WNLTTcWMDNI0KY/JNcWSq
G2qa6lngiAyOS72Jd635ptZ6Wvjd0WtBt90NN2jtn+aRqwQ8uItlYMQscofFzskj
QnBF+NWER+44nxboghuQ041m6aI2XmYUErSOBZi6onbC3svH6coMldkkiueWFbdB
qac3UXue4LUcaGR5T9pCQsLgTl3D8V5icEM+HpTVVGQZkVrOuKMKe6T9N5FS/WFu
T6CuFf/OyU15a6RE4WW9QqKYsaHl8B6+0P7uqPoVIxs8hfJcwaUu9XwIiZYBZg7N
yYCQ7JBapC5KZlIcTCfFSxby8lCJoZbIM3Pk/pgCuKxGaS9rHHUGuIvz8TfnM9FO
2zlxl4E6k3NFgGKF3p6oiKayC74o6EOw50p6DMjrisG7kkWVsTRCMINF7CcfeVme
MI9ljGAMB1hPtHPhl27hMRfq+/iIbR9gb7+Xw2yKQL2QRjMDIGGxP3q4NjD4tV8P
VyTbAAwNARG8oMpNM628v3tsW+WYNot1SPUQuZbIx1pCwMeTqljWsdQxWRVW5UxM
dnXTCqZhQwH0ICG/jbepbP6ciVB/CSa7TVohEK6jiTMorhqynRMMJ6p48Z6HIlyY
0Ss8q9K29eXaqmLB8Oy3HmupqxH95TqMntqivzf6i6e5AQ0EWptoPQEIAL1OdDIl
7E3VKAEWH5GnMzdnl9bju/ovoMjLqGevHS9Gyz4OPyZvCQ2pS8k0sgqwsn4F8vWM
7L3xKTyQTSYOPby3do58+kxUrdTNlqGKEEcZDG+MSzxKyft7m+37fzbg6tcU+O3L
/7m8nYWQSRKJeist7Q8HrQJzehuzcgAnNmpeBqXHnAwRBvtqORvz5cQAIQ4EsEvP
f/unTjw95JtL1LtBOaOaynF9ap/TZ34OvDdARmZSdqPpRtEvjfgIboIYYt1dNmDH
kiSaaKaqBLCJTD2z5KT8ccDeF8lzYHAYzNy8v2lTc9vagCZH+lf3d2d6umNcr4y1
NGEN4ZEhrmt/lP0AEQEAAYkDRAQYAQgADwUCWptoPQIbAgUJAeEzgAEpCRBy4z5N
8aUDbsBdIAQZAQgABgUCWptoPQAKCRD619WmVzqkJDTvB/49MQNQk8YJTwAnbdSH
7stU2uQFBbkljE8Juz5WJK53lL6xUVUp/3gKrQio1F+z9uRVqRh0cQnqX6mPMe5a
2dlHEIDHTJjSlR5GCCBRDbssV6SN72xSVgbxVGZ9L32qUYtiwGnxwXQC9D9KsonD
YfGfUhD1CLAldr6HwhJkOq4QKz5GF4ty8sMKEcpM5UaR2sa2y6Ebn9Vzma10YaVp
X7RlZM/iTiDhTmWuxLh7e21DI95k/eFqHpKA912N0n1BdzZPbwk83nVRxXg793NU
RFpegzlLawtaCW9oVJOYqErMGOYN5nbXAHXsr5X7Or70v1Yo1khgzNOfnirO57T9
VjT0J1QP/j1xobgMuNda7Fpwc0xo2oIKEf+SWY9GQrkUK7wCLDbpgbGVxTmREkyE
6oyDzW1QpQXRjLS9Wvtun0J3Wn6r6Aar0VaGKa7uiiq8UORWtFkWQAzzyBj87Rjx
eWZWV1dzLK7eMJdyN0gsOzcejrsOqf1sydzvhm4K66byjDZ78piv0DdyPIb4OsiQ
QU2GH+QwGRYIkYlU9f9g+hSasAfzvrATHlJZNrOjOCgXbut/yP9ug3DHKj76wmoU
n/Y4rpmskAzIQrZIpimkpNBmZVTGr4bkWcokVzrRFor3NCpl1qA1K9Cd43wARH8t
Zwk4evI4abbqUId0vVNCKSSDyCCjgNwRsmU8RXdn7r0vs4ObKuWfY9Yl2y8tq3qO
YPHr0r50YXWtKqUNy5JUc3Ow9DFR1p4O4yfmiSyTLUyOYbglfvtnO32OJkrrZ8Kq
iSDnyiq9u2nYJAEHk7AchF5TJrTCnd8yWNIjohild+wc+rMKktspoEcxmT6zaK4T
AymmEe3jzQhlxptpBm68t2E5kaYFQh+NnizXlRcmFjBgEhH2CxCoerUpK82Y+MuE
S/ODmF9kULre14aXAAspj4vldQ/LJm0SYEfTkE1sDipJQUzv6oS5AFloQDXZPWTB
15P+Xsj8sJV9hWCfJZVmppWuPg/pCYXwdjUHUYouTz3ZL6qnUbm2uQINBFpZUO0B
EADZkfsbXPpDcMALUgVmB3cJU90tFqc8Q5guK9oSs3ibx4SmyhBVmeF2TF6PQNoK
YvpUR50hAcx2AbwtY51u0XxrAr8kFiL6R5ce/0pVMfXGchcC58CJ3uf+O+OPt080
ftyVSTsY9xEnBohMoJescn0L/IPaM6KkkIFIMAI4Ct3QCHox7WHgPLrqB7Efx2Dj
Qkgjbioqj8zVKDwxTs0S3sknch675gOhsCkaI7KMcRGACDinKRF3wQha4brNa8En
vPTV01Cv0ttCo6NCcbQzQi8QQYpMQcWVkquEJWweZQvL/OvYWdT13JgnIp66pkmo
+JvGTOqQpSnIx6OQnV9yqwqsg4E0dkCE+9rDYAHpwvmRkaI2sItjN4KAEQdTWES8
RgGVgfCtvNH87PqpaPIgarMDY/j5KqTDp/7Nc5oGLCmhwZiYzQa7Bm5uVNnYIyOO
d1IjfclgVdVAzMOmrFytvXFCBcga1khL15taC7s6Vuld89TgMZdSot2RVz8W8Xc+
39ZrBvzrCeYsPq5/U0Z85cnOSw4skwh06wsxTvL1D70SilI2c0YdR1sVgbhq04HN
7FyE7SDQ1GqxyTJAU+OPH3Pk97Bl25vWD43RCCIjSUHhKzQRPno2ItObFepE+QJH
lSO1YMXmZDAfsRts3dca3VSDOdAQely6G7HQu5kXWXGtRQARAQABiQIfBBgBCAAJ
BQJaWVDtAhsMAAoJEHLjPk3xpQNuRlsQAKnkyjjXRvfMB3eKZ1PrWf7DBx5WL8Hk
r2QAnv0diDeRTzotQyzKivf3/W3gQc9kQi/ilaPuuXeW+yKiNQaIbai7EC0DdyHa
qG9s8F7UDoojFOGCc3OVpQvuucVbv4a6EpqXBI6ayrQW0jMXakiuIF/dL5uCGRLn
sPMB87xJfMrkqEziymJMooQTRckgj2S9J2QYDZFxmKChPl1kXMlmx6Y4FhzrsYwo
I47hW9hHG1+8129FOgwYTwELEuX6FKShcKpyy77b4Tar3UvdzNkfaysFPvX3O7Oh
Bes2VgfslEZSgK2CScigvLIz9Ehe9CUw6WEC6LZW3bbC+Cbz624gOsm/GYI9Llsc
5/NMMwQTCoTRm+b0UAYQvzHDS7km9yceNSfFlkpE/lWnIt9E0H+8bOwEbYF8y2qy
yLXIm7MYEm4UnUZ0j8kihP+wSHsZP2xPhjEUcQw1ZWhWLACvzlEC0a3zsovoJ6U8
zrqqfEBtuQgfwwJRWArMLQn/rlEJSrTrFfehwhvH3dPVSU36N5nBA8ls5YlSHBQv
38dChpBXu3wzFhetkSuHxdlfopeZMtDmljnIkBNTEFg01oqJNPbyiX2JQcfKizCt
maCIiJY+hOYIKkJdkt1FyOhADBciebxCQrpIIzWupeyQNuVj3I4g6YaQX00+kgiB
H1XKrAg/dpMNmQENBFrHiEoBCADASg9zHYzaSU0/1q1hcmTHU6H4syCQXHHd3zF7
n/RcsGnt4RBuUi/BUvNp3zYR6uFOyyk6LPV1hq2Ca8e/oSFrDYxqLladESQd9GNN
stDeK3HinsWJCVwSbkzNJbUtyr6SclmWt66vNqBZngMallJRQe8QDqpg0ZSZj/0d
VGxPBR16zc/2ddGnXJFe/V5XAWAap9SEo44pyGK4xf87Bgq8jT33LuQtd8exOk3E
atkK5jLEn9xmiheoSePEhOoQSrJMHfMjFka0PYZlCeaaHw7r7yXb/VoHFOAPxb6k
a1cunbp39b4z7Jy9kLBy0tbDnAs/sLp4LUN3Vx1JLoXBSIsHABEBAAG0JFJhdWwg
Sm9yZGFuIDxyYXVsQHByeXNtYXRpY2xhYnMuY29tPokBNwQTAQoAIQUCWseISgIb
AwULCQgHAwUVCgkICwUWAgMBAAIeAQIXgAAKCRCVRSpwGBD+26lmB/wJxChWwva0
StjMRSk3V7JTBPi5RNQm3VY8UydUNjsXzHVvtjUczOj+zHfo8tYUYa/ypWujXEX9
bVYVMr9JGNjm+rysI1gmW1gcx9pZr9gde4CR4Owfpwjh8oFnSrBBrcaelb0Krrv4
Okms43fEw1bWpllayal5SqknOIxpw1ZiNpG3jVe/C9n1/3Nw4XF5R0RHDxXTu6Hu
H3t7zRQwAJcOod6ccRdzNR9CsGdnOoNPG3pqs9w6zWUp0QETMAMcEpSLCJumaVfQ
24PK+MF92F5JRwNVt80xSLA9KMyNnv/ZvxZXXLjkIhsAmwYkHUs3WSm5HJ4qqOCX
V8VwFbwnea3eiQIzBBABCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl9zs2AA
CgkQcuM+TfGlA24dMA/+N31SPAXVgwiNasBknb+YSq2/OQPogxQUc8tupvDcQ62H
u0kvA7pUPUFjITJ1xsm8CRXfb7Hge9rNUw9Y2MNKf4sIDQs3bFeKOiAGVbO8Z055
5VCTWWPhXzjWoR84YWrx0Go8WT/V3Lahy4frGA3Vsza6wzi2P6c7LF4jqX2iBD1l
OpAYNGyt0sX/RLp3s2jOTWJwVyRR4UKSZOgpi9OTGLXrq5oU2dpwEIzBmhaWIs+u
oD5/4TaAt4lEFu3Sxk5w8fJlUXbM4IG8A1l+dnRPF5rtjtfvuX0GeQcDtJ5e1qHj
7PvVj8HjGPwxqsAjYHpg6QjyQCdtHYHdboEIU+OXMYRPRh2Iv0GUoeuMqoSsvFgn
c9PGN8Ai5u+oNzKeLJhpxpLV7s9zAbsripdnvDBn7RwuNx2ziqZayxoYvFRCAQl5
wzr0/eo/wq36D9EI7uJ2I3yt1g/VkwWQsAeE2skuGGbwed263cWTkl6g1w+vlDY3
/jc3a8HcS0zIUX194ChrbbNezGb74mQFoLk6PksLfhXseAKCs8bvPaTwIm0JGTMT
YzkRufrv1+I9KPFy3fTpvnMZTbud4nPCLsK179whk+Jrdv866i+E2WZ1JyzIZ9bB
P1x+ABi82PMdzhTw+pTkR1CVHmrmOiSi2MHRYGMedM9ECGnPIdab5d75r1qkuvO5
AQ0EWseISgEIAKHrgTVeZ0CWZMETk7NFjDTvBpoTMQDT7DDe2vFQHc8D2AvR3Aod
+CUaOeY0Yp0GOemd7BEcUbpYbCQk1B9G8vHNcQqTMLjBgujXwF6w2c+lqEu2/tyI
2/uGCOBDs4wuQZo+akCCMekqYjdZBfZV6nQzf/l7eQHIq+sNtnSiZmHdH46PLi7/
17wR9GxzKvuv1R73DCyekCcz1Puo0b7lfGD28kESJK1Mg9SAOqVjtaS58Oxo+Y1M
ZWRqh0tkAkgOBpdyddGy6TX/9c3a0U3eBQweRpNDh0eCEh5UsYDluL4NtXj7rVYd
4mHONJzI3h1LnKWvpVYmk703MPmtgeJwNzEAEQEAAYkBHwQYAQoACQUCWseISgIb
DAAKCRCVRSpwGBD+25PxCACNBj/2HBSpdYAxEGhNHSaw62y7Jwuf7NbsKIAqygzi
m2+dxae7PbAm64jEXYJA5GaCOs4xO52S/2+1N87e5J86VRNg0vlA9/ivFzERAxEg
rgIUyGXYfS8oA/4r5+PfKt/NvXO2wH3MPakrqZqXhOv+1IPvOt03wWoZKmYyVT6g
YnzutOsvH6cbhUh/D0WpuU8ZgkrFu5Xe7ynZoLm1qQOA23pkxxQbeNs0uoKUja9v
bx4eBtaliLc6rsXt+1WzdXA5ZRNqkdn87Tz5IU0FuQYVblYvPz9QoUlvx5siWVaP
Mmc7dIctWaWyaJmgjkLKdy36ydS5axhqn158yIjOZV3emQINBF8h6lMBEACovC4z
oieJ9iMZpWfylsLQKkeEmfSXnjcjW4RLUjs2CRWj+W6H7eCRy/MBOdx+6zAb8TE/
n/TSlP5l5Xx40BGDAMUZVFrJVEkMPK5kUmNzybg7PiuMd3qZE+pNyHEXXlLU77Dn
VO26TD9RvpKXdjm+ATsnQ6rvDNszkYI2Bj1tPxwZ7bRi96U/upL3WfYOsTLHirM3
pEkI3zfMZj8ufDX9XlmGrQ384E/hTgjpLXmDm/jMRlX3mRzDkV1HO+gEicfePm5+
K4eWlMCNXN5bcGwoEFY2LwAojRcbRaH/UH2S3btkG2eSK2fOFEwQ0G4vIyoLF60R
cEk1s6cYIgk3kVsmNSzA5iJ81nD5bbfTceUKsjTZiw5RmN0Vh5g75pw+3uoXB+55
egabEIt/GTZZlP6zhd/CKjTQR0R4+ZaEbZFOCtABukC5xfWGCRdNmXAWMBe0MYpW
ub2TRLISLfNNc7GWM4Y17d9aRhaql9gY6QLIRNGYuvGPiRMaJADZx46LtbWo8YiG
xLId7H8D+/0MSzMOg7RhELqYScYDigiVEXkTHA3QSprf/ohjm8woRhQY5CjCEZp5
8MvhD40VAo4Gxgx1V8lwB3CD3kDgFEaUZh2H+N/fmRegACN2lzEm1krOiS7sAB48
/Av99/1VXalLoBbzFTnuAYwnmLk8vdparT5LlwARAQABtChUZXJlbmNlIFRzYW8g
PHRlcmVuY2VAcHJ5c21hdGljbGFicy5jb20+iQJOBBMBCAA4FiEEMX1ukQWPjzwj
A7p3VjE+RFgSl6YFAl8h6lMCGwMFCwkIBwIGFQoJCAsCBBYCAwECHgECF4AACgkQ
VjE+RFgSl6YJgBAAqIAMzMFf/+qE896kuXYSlmaYtzhrMXgQVD09UgSq4JfVYudE
e8EfamWz8RFV7znxHO/5Fp4yoWeDGc2b40DhivvRb1p4XSJi+F64moeHt+qn4Hay
CDs5wGv40KJk0+2r7iILc3Gw4NP6r4c2HU2EqSW8fn/bYM3yxLsYEXt5zhgipKpR
CB4WmqojW+CxXkj8dv4CyY+HlKJ6nZWstK42h736njM44rjMDSDt3lv0ZYykopTw
09THnHkWOXY4PSrKOzE+OqsPMPOD3Gq3bYPqQ0ZgdS4FeuDesqqHjJabRX+7DgJK
lLEnylfDlvDKf6I/tqrAfqCCdQvEjpdQXG84GSdK//wT1biunHvGeyHM/PZx9V5O
X8YoecswcYjozU1F7x8buk8GxV7ZuJ2pbdXr7jTou1dA7e4Nas9Qu1RcK39MoTYc
Ub467ONFCX+Y3MzELTnEyx4hSwTxlWg7VjCQ63zrFOd8VG0WUydlqiHgIE8WCJVE
pQlrr2v/A6ieM1HY1Ftjt+mk/qncqibCdyIofyV4o5FrVo8arx74jR+Lhvkp1T9N
uRj8V6M3g9vxJdAsy1O5/ZrdwKJ6Yy1n62+9b/uxhklYONK7DpDlWU0ZLWHlKEIV
zzfILCiLf+nxMQdVwuthlcysg8JwYEE8xDXc851ciuj0ygv3Wm0BY44V6/m5Ag0E
XyHqUwEQALuKzOAlo75p2vSYD7TecE/E0hBOS+cjs8kRH+oIzm5fx7sf00SC1jU2
q5QLYLixNeT+l0bD70R7b8r2uFu1aZL7pQqbGIGisLHlxu8611+PCpE5AsQi3Wui
IZ6Y8K7grJ28vviBiZUBY3iCCRH0LuvyZN3R0zgyMGbzouQk5wuGJUkRHJKtV5by
FVEl3CYudRtAp5LPFw6j7UzT5yQqBmY6tXp5WMmmAvOtnu8ohpRhzY21dJMlSykX
Ne9rcARy8mVWNdcXJUIc85z0BmyrdiA4YY0XiZTHD9mslj+af4QHsaS+p3aorTLD
5BKkp5Ek79a1BUxBjrao4W2fljYf129/SHbwds/Dup26zB2vi/fhbfVPvevXLPpi
Vm4uz8fE4D5lPYZAdu5GtO2V9kWbhDtU1R1SJSdFDI9Sev3B+NLrstclGfdbFQKF
shbUxydjSX56OJvh5hee50PcCz+Ab+usoyUPkcOfET/L55AdqJo3cVYtnAKpOJG/
mKP5Ih3LZVm9wCdWTCzboEtDfLlcjCc5kLiE9Pq7sKMnXm/NcXNFWBkRuaHg+Mt5
Yk659/Q6oUG3yUrS2d7cSeuNRWNlaRlnk3hZtB13xpmoHGYmrMOe7wiEE5xEnOju
1ctRRRX6lNUPlSvID83Y9JSYL9hYMbidRmFY28AIddT/PjVTgfi1ABEBAAGJAjYE
GAEIACAWIQQxfW6RBY+PPCMDundWMT5EWBKXpgUCXyHqUwIbDAAKCRBWMT5EWBKX
poMOD/4/sRLnK94pY2qtSSfNZEumOdQVvges08u34saG4mT1iLD9TcRgsFvgwcTV
Pec9Git4iJmoJpFylvOzoBWmtDzJ7TB3JxvtwUfFH2uZ22k12eChyAFHI8TxASqI
TV2YwkIgB2PTmva1GrdNKM6y5bfS1BdS0u+Etgp4zN9nyQECY2sM2accUi2S7JBV
7o6lEJWCRgFUwIfY6fdfxhZo/w0g2u5wftUQYlbXWSQLImFFZBc/13k8EMxsfbMi
LQ/wxKEfe1ZwFoXqIS6cq8CHTdj70QO7K9ah6krGPmL23LVuxlkQIR7mT5yFJtv2
VRT4IYgOUl6rAd08B6MOy6MOMa14FpNrAybPK8Yd/eSdUNciVf17q33Yc15x+trs
En1hHbDS82xaLJ/Ld4mANeJVt3FodgVmzZGpKheFEOFXXeuX1ZZ4c9TgX+0wBKoh
aBb8EGBVo4aV4GbZ/gEa8wls5NNTwd56H4G94hiq+qFM39x4YjhAvkM0hLRYzR05
tSCdlEbkh2K6RbOhBPsNHCbypWRt2fN8/q4uLPJJt7NRQ2zi6H/x04HGblhXdX6H
mmYjJv1LTbem9VptcpvPauNsibeIvIWA2nYM2ncDWt6gJpOH9Zh4fHuaG4aCdNmJ
hPNeJNnmLcpDQvR9wU5w7e5tC/ZSeTZ5ul1zOKa1qZ4lJ50BDQ==
=a30p
-----END PGP PUBLIC KEY BLOCK-----

View File

@@ -3,24 +3,23 @@ Hash: SHA512
Contact: mailto:security@prysmaticlabs.com
Encryption: openpgp4fpr:0AE0051D647BA3C1A917AF4072E33E4DF1A5036E
Encryption: openpgp4fpr:341396BAFACC28C5082327F889725027FC8EC0D4
Encryption: openpgp4fpr:FEE44615A19049DF0CA0C2735E2B7E5734DFADCB
Encryption: openpgp4fpr:CD08DE68C60B82D3EE2A3F7D95452A701810FEDB
Encryption: openpgp4fpr:317D6E91058F8F3C2303BA7756313E44581297A6
Preferred-Languages: en
Canonical: https://github.com/prysmaticlabs/prysm/tree/master/.well-known/security.txt
-----BEGIN PGP SIGNATURE-----
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl6HrcwACgkQcuM+TfGl
A26voQ/8DFB5wUHP0uyY8k7FGbxhLzSeImxomnUHJaUGfdczbCdYPMEHc9zI1iZP
6LRiy9wS6qhqj/GSKVwvDPr+ZymXuV3L22GOP2lRhl7Z9Mm21ZJNOcoQBFOZnyHu
DAy9HeTmeuJxYkf8weqZYXyzEoKJBDmfuWmEFjrtMcFXUfT3aJn1E2A/AQdcVQIC
9L+iGWwFwjsPhcfaMuwcB7QMheDO6KSB7XPPCbrZ036Np8UTZ4qbZ5y73tlfkcOc
tYTrMSPtS4eNutiDOP5Np36cLzRtNpm/BziAK+7ZKiYY0HI5h9IkCTLO4x2UmAMX
sPoeaAB5z2QLIwmU9J2NhJrwiNMGTpJ+0bowy8U4cgzAX20CXVjRqGhy+cir8Ewg
DjEGjWINUw6W0yzJp0mKSKzuOhdTTmzIYBeMBsyce+pgN1KGFCxeIwxGxyJzADdw
mYQdljRXn4yEYP/KEpu/F2o8L4ptRO2jZWKvTvdzSSGGSyKyF4HsIRJ7m98DaB6S
0oGq1KpbKKTbQi5g8UShGV2gPeMCs5ZIIqK2b/cRzUet18aUuofLmR4lkKZa9yEG
rbzuJq/gB2vgQwExUEgVQ3/DfVc+y80e3YZ5s+rzV0vbLxl4Gh4yExpLo7hRf9iY
EFvMzH+BEEb5VfCwByZyV1BmesZVIosr7K6UmVtPe0bZGvv3uIg=
=5qpD
iQIzBAEBCgAdFiEECuAFHWR7o8GpF69AcuM+TfGlA24FAl++klgACgkQcuM+TfGl
A27rQw/6A29p1W20J0v+h218p8XWLSUpTIGLnZTxw6KqdyVXMzlsQK0YG4G2s2AB
0LKh7Ae/Di5E0U+Z4AjUW5nc5eaCxK36GMscH9Ah0rgJwNYxEJw7/2o8ZqVT/Ip2
+56rFihRqxFZfaCNKFVuZFaL9jKewV9FKYP38ID6/SnTcrOHiu2AoAlyZGmB03p+
iT57SPRHatygeY4xb/gwcfREFWEv+VHGyBTv8A+6ABZDxyurboCFMERHzFICrbmk
8UdHxxlWZDnHAbAUyAwpERC5znx6IHXQJwF8TMtu6XY6a6axT2XBOyJDF9/mZOz+
kdkz6loX5uxaQBGLtTv6Kqf1yUGANOZ16VhHvWwL209LmHmigIVQ+qSM6c79PsW/
vrsqdz3GBsiMC5Fq2vYgnbgzpfE8Atjn0y7E+j4R7IvwOAE/Ro/b++nqnc4YqhME
P/yTcfGftaCrdSNnQCXeoV9JxpFM5Xy8KV3eexvNKbcgA/9DtgxL5i+s5ZJkUT9A
+qJvoRrRyIym32ghkHgtFJKB3PLCdobeoOVRk6EnMo9zKSiSK2rZEJW8Ccbo515D
W9qUOn3GF7lNVuUFAU/YKEdmDp/AVaViZ7vH+8aq0LC0HBkZ8XlzWnWoArS8sMhw
fX0R9g/HMgrwNte/d0mwim5lJ2Plgv60Bh4grJqwZJeWbU0zi1U=
=uW+X
-----END PGP SIGNATURE-----

View File

@@ -4,7 +4,7 @@ Note: The latest and most up to date documenation can be found on our [docs port
Excited by our work and want to get involved in building out our sharding releases? Or maybe you haven't learned as much about the Ethereum protocol but are a savvy developer?
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PRs after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/che9auJ) or [Gitter](https://gitter.im/prysmaticlabs/geth-sharding) drop us a line there if you want to get more involved or have any questions on our implementation!
You can explore our [Open Issues](https://github.com/prysmaticlabs/prysm/issues) in-the works for our different releases. Feel free to fork our repo and start creating PRs after assigning yourself to an issue of interest. We are always chatting on [Discord](https://discord.gg/CTYGPUJ) drop us a line there if you want to get more involved or have any questions on our implementation!
## Contribution Steps
@@ -62,12 +62,6 @@ Changes that only affect a single file can be tested with
$ go test <file_you_are_working_on>
```
Changes that affect multiple files can be tested with ...
```
$ golangci-lint run && bazel test //...
```
**10. Stage the file or files that you want to commit.**
```
@@ -181,7 +175,7 @@ We consider two types of contributions to our repo and categorize them as follow
### Part-Time Contributors
Anyone can become a part-time contributor and help out on implementing sharding. The responsibilities of a part-time contributor include:
Anyone can become a part-time contributor and help out on implementing eth2. The responsibilities of a part-time contributor include:
- Engaging in Gitter conversations, asking the questions on how to begin contributing to the project
- Opening up github issues to express interest in code to implement
@@ -192,8 +186,6 @@ Anyone can become a part-time contributor and help out on implementing sharding.
- Follow up on open PRs
- Have an estimated timeframe to completion and let the core contributors know if a PR will take longer than expected
We do not expect all part-time contributors to be experts on all the latest sharding documentation, but all contributors should at least be familiarized with our sharding [README.md](https://github.com/prysmaticlabs/prysm/blob/master/validator/README.md) and have gone through the required Ethereum readings as posted on our [READINGS.md](https://github.com/prysmaticlabs/prysm/blob/master/docs/READINGS.md) document.
### Core Contributors
Core contributors are remote contractors of Prysmatic Labs, LLC. and are considered critical team members of our organization. Core devs have all of the responsibilities of part-time contributors plus the majority of the following:

View File

@@ -1,20 +1,35 @@
# Prysm: An Ethereum 2.0 Client Written in Go
# Prysm: An Ethereum 2.0 Implementation Written in Go
[![Build status](https://badge.buildkite.com/b555891daf3614bae4284dcf365b2340cefc0089839526f096.svg?branch=master)](https://buildkite.com/prysmatic-labs/prysm)
[![Go Report Card](https://goreportcard.com/badge/github.com/prysmaticlabs/prysm)](https://goreportcard.com/report/github.com/prysmaticlabs/prysm)
[![ETH2.0_Spec_Version 0.12.3](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v0.12.3-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v0.12.3)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/KSA7rPr)
[![ETH2.0_Spec_Version 1.0.0](https://img.shields.io/badge/ETH2.0%20Spec%20Version-v1.0.0-blue.svg)](https://github.com/ethereum/eth2.0-specs/tree/v1.0.0)
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 client specifications developed by [Prysmatic Labs](https://prysmaticlabs.com).
This is the core repository for Prysm, a [Golang](https://golang.org/) implementation of the Ethereum 2.0 specification, developed by [Prysmatic Labs](https://prysmaticlabs.com).
### Getting Started
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/KSA7rPr).
### Come join the testnet!
Participation is now open to the public for our Ethereum 2.0 phase 0 testnet release. Visit [prylabs.net](https://prylabs.net) for more information on the project or to sign up as a validator on the network. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
A detailed set of installation and usage instructions as well as breakdowns of each individual component are available in the [official documentation portal](https://docs.prylabs.network). If you still have questions, feel free to stop by our [Discord](https://discord.gg/CTYGPUJ).
### Staking on Mainnet
To participate in staking, you can join the [official eth2 launchpad](https://launchpad.ethereum.org). The launchpad is the only recommended way to become a validator on mainnet. You can visualize the nodes in the network on [eth2stats.io](https://eth2stats.io), explore validator rewards/penalties via Bitfly's block explorer: [beaconcha.in](https://beaconcha.in), and follow the latest blocks added to the chain on [beaconscan](https://beaconscan.com).
## Contributing
### Branches
Prysm maintains two permanent branches:
* master: This points to the latest stable release. It is ideal for most users.
* develop: This is used for development, it contains the latest PRs. Developers should base their PRs on this branch.
### Guide
Want to get involved? Check out our [Contribution Guide](https://docs.prylabs.network/docs/contribute/contribution-guidelines/) to learn more!
## License
[GNU General Public License v3.0](https://www.gnu.org/licenses/gpl-3.0.en.html)
## Legal Disclaimer
[Terms of Use](/TERMS_OF_SERVICE.md)

View File

@@ -1,43 +0,0 @@
# Testnet
The Prysmatic Labs test network is available for anyone to join. The easiest way to participate is by joining through the website, https://prylabs.net.
## Interop
For developers looking to connect a client other than Prysm to the test network, here is the relevant information for compatability.
**Spec version** - [v0.8.3](https://github.com/ethereum/eth2.0-specs/tree/v0.8.3)
**ETH 1 Deposit Contract Address** - See https://prylabs.net/contract. This contract is deployed on the [goerli](https://goerli.net/) network.
**Genesis time** - The ETH1 block time in which the 64th deposit to start ETH2 was included. This is NOT midnight of the next day as required by spec.
### ETH 2 Configuration
Use the [minimal config](https://github.com/ethereum/eth2.0-specs/blob/v0.8.3/configs/minimal.yaml) with the following changes.
| field | value |
|-------|-------|
| MIN_DEPOSIT_AMOUNT | 100 |
| MAX_EFFECTIVE_BALANCE | 3.2 * 1e9 |
| EJECTION_BALANCE | 1.6 * 1e9 |
| EFFECTIVE_BALANCE_INCREMENT | 0.1 * 1e9 |
| ETH1_FOLLOW_DISTANCE | 16 |
| GENESIS_FORK_VERSION | See [latest code](https://github.com/prysmaticlabs/prysm/blob/master/shared/params/config.go#L236) |
These parameters reduce the minimal config to 1/10 of the required ETH.
We have a genesis.ssz file available for download [here](https://prysmaticlabs.com/uploads/genesis.ssz)
### Connecting to the network
We have a libp2p bootstrap node available at `/dns4/prylabs.net/tcp/30001/p2p/16Uiu2HAm7Qwe19vz9WzD2Mxn7fXd1vgHHp4iccuyq7TxwRXoAGfc`.
Some of the Prysmatic Labs hosted nodes are behind a libp2p relay, so your libp2p implementation protocol should understand this functionality.
### Other
Undoubtably, you will have bugs. Reach out to us on [Discord](https://discord.gg/KSA7rPr) and be sure to capture issues on Github at https://github.com/prysmaticlabs/prysm/issues.
If you have instructions for you client, we would love to attempt this on your behalf. Kindly send over the instructions via github issue, PR, email to team@prysmaticlabs.com, or discord.

View File

@@ -1 +0,0 @@
0.2.0

View File

@@ -89,10 +89,10 @@ http_archive(
# nogo check fails for certain third_party dependencies.
"//third_party:io_bazel_rules_go.patch",
],
sha256 = "207fad3e6689135c5d8713e5a17ba9d1290238f47b9ba545b63d9303406209c6",
sha256 = "81eff5df9077783b18e93d0c7ff990d8ad7a3b8b3ca5b785e1c483aacdb342d7",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.7/rules_go-v0.24.7.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.7/rules_go-v0.24.7.tar.gz",
"https://mirror.bazel.build/github.com/bazelbuild/rules_go/releases/download/v0.24.9/rules_go-v0.24.9.tar.gz",
"https://github.com/bazelbuild/rules_go/releases/download/v0.24.9/rules_go-v0.24.9.tar.gz",
],
)
@@ -139,9 +139,9 @@ load(
container_pull(
name = "alpine_cc_linux_amd64",
digest = "sha256:3f7f4dfcb6dceac3a902f36609cc232262e49f5656a6dc4bb3da89e35fecc8a5",
digest = "sha256:752aa0c9a88461ffc50c5267bb7497ef03a303e38b2c8f7f2ded9bebe5f1f00e",
registry = "index.docker.io",
repository = "fasibio/alpine-libgcc",
repository = "pinglamb/alpine-glibc",
)
container_pull(
@@ -156,7 +156,7 @@ load("@io_bazel_rules_go//go:deps.bzl", "go_register_toolchains", "go_rules_depe
go_rules_dependencies()
go_register_toolchains(
go_version = "1.15.5",
go_version = "1.15.6",
nogo = "@//:nogo",
)
@@ -352,9 +352,9 @@ filegroup(
visibility = ["//visibility:public"],
)
""",
sha256 = "6bb16ff0dc9348090cc31a9ea453643d32b617e66ac6e7bb38985d530070631b",
sha256 = "117f5366af9cf009354ed1abe02f906168158473461d69c8056984b9b0292619",
urls = [
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/0.0.2-alpha/prysm-web-ui.tar.gz",
"https://github.com/prysmaticlabs/prysm-web-ui/releases/download/v1.0.0-beta.2/prysm-web-ui.tar.gz",
],
)

View File

@@ -1,7 +1,7 @@
load("@prysm//tools/go:def.bzl", "go_library")
load("@io_bazel_rules_go//go:def.bzl", "go_binary", "go_test")
load("@io_bazel_rules_docker//go:image.bzl", "go_image")
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle")
load("@io_bazel_rules_docker//container:container.bzl", "container_bundle", "container_image")
load("//tools:go_image.bzl", "go_image_alpine", "go_image_debug")
load("@io_bazel_rules_docker//contrib:push-all.bzl", "docker_push")
@@ -45,13 +45,19 @@ go_image(
visibility = ["//visibility:private"],
)
container_image(
name = "image_with_creation_time",
base = "image",
stamp = True,
)
container_bundle(
name = "image_bundle",
images = {
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image",
"gcr.io/prysmaticlabs/prysm/beacon-chain:latest": ":image_with_creation_time",
"gcr.io/prysmaticlabs/prysm/beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:latest": ":image_with_creation_time",
"index.docker.io/prysmaticlabs/prysm-beacon-chain:{DOCKER_TAG}": ":image_with_creation_time",
},
tags = ["manual"],
)

View File

@@ -1,10 +1,9 @@
# Prysmatic Labs Beacon Chain Implementation
This is the main project folder for the beacon chain implementation of Ethereum Serenity in Golang by [Prysmatic Labs](https://prysmaticlabs.com). Before you begin, check out our [Contribution Guidelines](https://github.com/prysmaticlabs/prysm/blob/master/CONTRIBUTING.md) and join our active chat room on Discord or Gitter below:
This is the main project folder for the beacon chain implementation of eth2 written in Go by [Prysmatic Labs](https://prysmaticlabs.com).
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/KSA7rPr)
[![Gitter](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/prysmaticlabs/prysm?badge&utm_medium=badge&utm_campaign=pr-badge)
You can also read our main [README](https://github.com/prysmaticlabs/prysm/blob/master/README.md) and join our active chat room on Discord.
Also, read the latest beacon chain [design spec](https://github.com/ethereum/eth2.0-specs/blob/dev/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at prysmatic labs.
Check out the [FAQs](https://notes.ethereum.org/9MMuzWeFTTSg-3Tz_YeiBA?view). Refer this page on [why](http://email.mg2.substack.com/c/eJwlj9GOhCAMRb9G3jRQQPGBh5mM8xsbhKrsDGIAM9m_X9xN2qZtbpt7rCm4xvSjj5gLOTOmL-809CMbKXFaOKakIl4DZYr2AGyQIGjHOnWH22OiYnoIxmDijaBhhS6fcy7GvjobA9m0mSXOcnZq5GBqLkilXBZhBsus5ZK89VbKkRt-a-BZI6DzZ7iur1lQ953KJ9bemnxgahuQU9XJu6pFPdu8meT8vragzEjpMCwMGLlgLo6h5z1JumQTu4IJd4v15xqMf_8ZLP_Y1bSLdbnrD-LL71i2Kj7DLxaWWF4)
we are combining sharding and casper together.
[![Discord](https://user-images.githubusercontent.com/7288322/34471967-1df7808a-efbb-11e7-9088-ed0b04151291.png)](https://discord.gg/CTYGPUJ)
Also, read the official beacon chain [specification](https://github.com/ethereum/eth2.0-specs/blob/master/specs/phase0/beacon-chain.md), this design spec serves as a source of truth for the beacon chain implementation we follow at Prysmatic Labs.

View File

@@ -2,18 +2,14 @@ package blockchain
import (
"io/ioutil"
"os"
"testing"
"github.com/sirupsen/logrus"
)
func TestMain(m *testing.M) {
run := func() int {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"context"
"fmt"
"time"
"github.com/pkg/errors"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
@@ -99,6 +100,14 @@ func (s *Service) processAttestation(subscribedToStateEvents chan struct{}) {
<-stateChannel
stateSub.Unsubscribe()
if s.genesisTime.IsZero() {
log.Warn("ProcessAttestations routine waiting for genesis time")
for s.genesisTime.IsZero() {
time.Sleep(1 * time.Second)
}
log.Warn("Genesis time received, now available to process attestations")
}
st := slotutil.GetSlotTicker(s.genesisTime, params.BeaconConfig().SecondsPerSlot)
for {
select {

View File

@@ -175,7 +175,11 @@ func (s *Service) Start() {
if err != nil {
log.Fatalf("Could not retrieve genesis state: %v", err)
}
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()))
gRoot, err := gState.HashTreeRoot(s.ctx)
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slotutil.CountdownToGenesis(s.ctx, s.genesisTime, uint64(gState.NumValidators()), gRoot)
justifiedCheckpoint, err := s.beaconDB.JustifiedCheckpoint(s.ctx)
if err != nil {
@@ -202,12 +206,14 @@ func (s *Service) Start() {
log.Fatalf("Could not get start slot of finalized epoch: %v", err)
}
h := s.headBlock().Block
log.WithFields(logrus.Fields{
"startSlot": ss,
"endSlot": h.Slot,
}).Info("Loading blocks to fork choice store, this may take a while.")
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
if h.Slot > ss {
log.WithFields(logrus.Fields{
"startSlot": ss,
"endSlot": h.Slot,
}).Info("Loading blocks to fork choice store, this may take a while.")
if err := s.fillInForkChoiceMissingBlocks(s.ctx, h, s.finalizedCheckpt, s.justifiedCheckpt); err != nil {
log.Fatalf("Could not fill in fork choice store missing blocks: %v", err)
}
}
if err := s.VerifyWeakSubjectivityRoot(s.ctx); err != nil {
@@ -269,7 +275,11 @@ func (s *Service) processChainStartTime(ctx context.Context, genesisTime time.Ti
log.Fatalf("Could not initialize beacon chain: %v", err)
}
// We start a counter to genesis, if needed.
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()))
gRoot, err := initializedState.HashTreeRoot(s.ctx)
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slotutil.CountdownToGenesis(ctx, genesisTime, uint64(initializedState.NumValidators()), gRoot)
// We send out a state initialized event to the rest of the services
// running in the beacon node.
@@ -332,6 +342,11 @@ func (s *Service) Stop() error {
}
}
// Save cached state summaries to the DB before stop.
if err := s.stateGen.SaveStateSummariesToDB(s.ctx); err != nil {
return err
}
// Save initial sync cached blocks to the DB before stop.
return s.beaconDB.SaveBlocks(s.ctx, s.getInitSyncBlocks())
}
@@ -339,6 +354,9 @@ func (s *Service) Stop() error {
// Status always returns nil unless there is an error condition that causes
// this service to be unhealthy.
func (s *Service) Status() error {
if s.genesisRoot == params.BeaconConfig().ZeroHash {
return errors.New("genesis state has not been created")
}
if runtime.NumGoroutine() > s.maxRoutines {
return fmt.Errorf("too many goroutines %d", runtime.NumGoroutine())
}

View File

@@ -446,6 +446,7 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
cancel: cancel,
beaconDB: db,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
stateGen: stategen.New(db, cache.NewStateSummaryCache()),
}
b := testutil.NewBeaconBlock()
r, err := b.Block.HashTreeRoot()
@@ -455,6 +456,25 @@ func TestServiceStop_SaveCachedBlocks(t *testing.T) {
require.Equal(t, true, s.beaconDB.HasBlock(ctx, r))
}
func TestServiceStop_SaveCachedStateSummaries(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
db, _ := testDB.SetupDB(t)
c := cache.NewStateSummaryCache()
s := &Service{
ctx: ctx,
cancel: cancel,
beaconDB: db,
initSyncBlocks: make(map[[32]byte]*ethpb.SignedBeaconBlock),
stateGen: stategen.New(db, c),
}
r := [32]byte{'a'}
ss := &pb.StateSummary{Root: r[:], Slot: 1}
c.Put(r, ss)
require.NoError(t, s.Stop())
require.Equal(t, true, s.beaconDB.HasStateSummary(ctx, r))
}
func BenchmarkHasBlockDB(b *testing.B) {
db, _ := testDB.SetupDB(b)
ctx := context.Background()

View File

@@ -1,17 +1,13 @@
package cache
import (
"os"
"testing"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
func TestMain(m *testing.M) {
run := func() int {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
defer resetCfg()
return m.Run()
}
os.Exit(run())
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableEth1DataVoteCache: true})
defer resetCfg()
m.Run()
}

View File

@@ -244,7 +244,7 @@ func (dc *DepositCache) PruneProofs(ctx context.Context, untilDepositIndex int64
dc.depositsLock.Lock()
defer dc.depositsLock.Unlock()
if untilDepositIndex > int64(len(dc.deposits)) {
if untilDepositIndex >= int64(len(dc.deposits)) {
untilDepositIndex = int64(len(dc.deposits) - 1)
}

View File

@@ -700,6 +700,49 @@ func TestPruneProofs_PruneAllWhenDepositIndexTooBig(t *testing.T) {
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
}
func TestPruneProofs_CorrectlyHandleLastIndex(t *testing.T) {
dc, err := New()
require.NoError(t, err)
deposits := []struct {
blkNum uint64
deposit *ethpb.Deposit
index int64
}{
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 0,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 1,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 2,
},
{
blkNum: 0,
deposit: &ethpb.Deposit{Proof: makeDepositProof()},
index: 3,
},
}
for _, ins := range deposits {
dc.InsertDeposit(context.Background(), ins.deposit, ins.blkNum, ins.index, [32]byte{})
}
require.NoError(t, dc.PruneProofs(context.Background(), 4))
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[0].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[1].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[2].Deposit.Proof)
assert.DeepEqual(t, ([][]byte)(nil), dc.deposits[3].Deposit.Proof)
}
func makeDepositProof() [][]byte {
proof := make([][]byte, int(params.BeaconConfig().DepositContractTreeDepth)+1)
for i := range proof {

View File

@@ -1,21 +1,17 @@
package spectest
import (
"os"
"testing"
"github.com/prysmaticlabs/prysm/shared/params"
)
func TestMain(m *testing.M) {
run := func() int {
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
c := params.BeaconConfig()
c.MinGenesisActiveValidatorCount = 16384
params.OverrideBeaconConfig(c)
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -11,7 +11,6 @@ go_library(
name = "go_default_library",
srcs = [
"alias.go",
"http_backup_handler.go",
] + select({
":kafka_disabled": [
"db.go",

View File

@@ -11,6 +11,7 @@ go_library(
"//beacon-chain/state:go_default_library",
"//proto/beacon/db:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/backuputil:go_default_library",
"@com_github_ethereum_go_ethereum//common:go_default_library",
"@com_github_prysmaticlabs_ethereumapis//eth/v1alpha1:go_default_library",
],

View File

@@ -13,6 +13,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/state"
"github.com/prysmaticlabs/prysm/proto/beacon/db"
ethereum_beacon_p2p_v1 "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/backuputil"
)
// ReadOnlyDatabase defines a struct which only has read access to database methods.
@@ -100,11 +101,9 @@ type HeadAccessDatabase interface {
// Database interface with full access.
type Database interface {
io.Closer
backuputil.BackupExporter
HeadAccessDatabase
DatabasePath() string
ClearDB() error
// Backup and restore methods
Backup(ctx context.Context, outputDir string) error
}

View File

@@ -51,11 +51,11 @@ func (s *Store) Backup(ctx context.Context, outputDir string) error {
&bolt.Options{Timeout: params.BeaconIoConfig().BoltTimeout},
)
if err != nil {
panic(err)
return err
}
defer func() {
if err := copyDB.Close(); err != nil {
logrus.WithError(err).Error("Failed to close destination database")
logrus.WithError(err).Error("Failed to close backup database")
}
}()

View File

@@ -3,7 +3,8 @@ package kv
import (
"context"
"io/ioutil"
"path"
"os"
"path/filepath"
"testing"
"github.com/prysmaticlabs/prysm/shared/testutil"
@@ -26,7 +27,22 @@ func TestStore_Backup(t *testing.T) {
require.NoError(t, db.Backup(ctx, ""))
files, err := ioutil.ReadDir(path.Join(db.databasePath, backupsDirectoryName))
backupsPath := filepath.Join(db.databasePath, backupsDirectoryName)
files, err := ioutil.ReadDir(backupsPath)
require.NoError(t, err)
require.NotEqual(t, 0, len(files), "No backups created")
require.NoError(t, db.Close(), "Failed to close database")
oldFilePath := filepath.Join(backupsPath, files[0].Name())
newFilePath := filepath.Join(backupsPath, databaseFileName)
// We rename the file to match the database file name
// our NewKVStore function expects when opening a database.
require.NoError(t, os.Rename(oldFilePath, newFilePath))
backedDB, err := NewKVStore(backupsPath, nil)
require.NoError(t, err, "Failed to instantiate DB")
t.Cleanup(func() {
require.NoError(t, backedDB.Close(), "Failed to close database")
})
require.Equal(t, true, backedDB.HasState(ctx, root))
}

View File

@@ -11,6 +11,7 @@ go_library(
visibility = ["//beacon-chain:__subpackages__"],
deps = [
"//shared/cmd:go_default_library",
"//shared/params:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@com_github_urfave_cli_v2//:go_default_library",
],

View File

@@ -3,6 +3,7 @@
package flags
import (
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/urfave/cli/v2"
)
@@ -17,7 +18,7 @@ var (
DepositContractFlag = &cli.StringFlag{
Name: "deposit-contract",
Usage: "Deposit contract address. Beacon chain node will listen logs coming from the deposit contract to determine when validator is eligible to participate.",
Value: "0x07b39F4fDE4A38bACe212b546dAc87C58DfE3fDC", // Medalla deposit contract address.
Value: params.BeaconNetworkConfig().DepositContractAddress,
}
// RPCHost defines the host on which the RPC server should listen.
RPCHost = &cli.StringFlag{
@@ -69,7 +70,7 @@ var (
Name: "grpc-gateway-corsdomain",
Usage: "Comma separated list of domains from which to accept cross origin requests " +
"(browser enforced). This flag has no effect if not used with --grpc-gateway-port.",
Value: "http://localhost:4242,http://127.0.0.1:4242,http://localhost:4200",
Value: "http://localhost:4200,http://localhost:7500,http://127.0.0.1:4200,http://127.0.0.1:7500,http://0.0.0.0:4200,http://0.0.0.0:7500",
}
// MinSyncPeers specifies the required number of successful peer handshakes in order
// to start syncing with external peers.
@@ -130,6 +131,10 @@ var (
Name: "enable-debug-rpc-endpoints",
Usage: "Enables the debug rpc service, containing utility endpoints such as /eth/v1alpha1/beacon/state.",
}
SubscribeToAllSubnets = &cli.BoolFlag{
Name: "subscribe-all-subnets",
Usage: "Subscribe to all possible attestation subnets.",
}
// HistoricalSlasherNode is a set of beacon node flags required for performing historical detection with a slasher.
HistoricalSlasherNode = &cli.BoolFlag{
Name: "historical-slasher-node",
@@ -152,14 +157,10 @@ var (
"If such a sync is not possible, the node will treat it a critical and irrecoverable failure",
Value: "",
}
// EnableBackupWebhookFlag for users to trigger db backups via an HTTP webhook.
EnableBackupWebhookFlag = &cli.BoolFlag{
Name: "enable-db-backup-webhook",
Usage: "Serve HTTP handler to initiate database backups. The handler is served on the monitoring port at path /db/backup.",
}
// BackupWebhookOutputDir to customize the output directory for db backups.
BackupWebhookOutputDir = &cli.StringFlag{
Name: "db-backup-output-dir",
Usage: "Output directory for db backups",
// Eth1HeaderReqLimit defines a flag to set the maximum number of headers that a deposit log query can fetch. If none is set, 1000 will be the limit.
Eth1HeaderReqLimit = &cli.Uint64Flag{
Name: "eth1-header-req-limit",
Usage: "Sets the maximum number of headers that a deposit log query can fetch.",
Value: uint64(1000),
}
)

View File

@@ -12,6 +12,7 @@ type GlobalFlags struct {
HeadSync bool
DisableSync bool
DisableDiscv5 bool
SubscribeToAllSubnets bool
MinimumSyncPeers int
BlockBatchLimit int
BlockBatchLimitBurstFactor int
@@ -44,6 +45,10 @@ func ConfigureGlobalFlags(ctx *cli.Context) {
log.Warn("Using Disable Sync flag, using this flag on a live network might lead to adverse consequences.")
cfg.DisableSync = true
}
if ctx.Bool(SubscribeToAllSubnets.Name) {
log.Warn("Subscribing to All Attestation Subnets")
cfg.SubscribeToAllSubnets = true
}
cfg.DisableDiscv5 = ctx.Bool(DisableDiscv5.Name)
cfg.BlockBatchLimit = ctx.Int(BlockBatchLimit.Name)
cfg.BlockBatchLimitBurstFactor = ctx.Int(BlockBatchLimitBurstFactor.Name)

View File

@@ -62,6 +62,7 @@ func (g *Gateway) Start() {
ethpb.RegisterNodeHandler,
ethpb.RegisterBeaconChainHandler,
ethpb.RegisterBeaconNodeValidatorHandler,
pbrpc.RegisterHealthHandler,
}
if g.enableDebugRPCEndpoints {
handlers = append(handlers, pbrpc.RegisterDebugHandler)

View File

@@ -97,7 +97,11 @@ func NewService(ctx context.Context, cfg *Config) *Service {
// Generated genesis time; fetch it
s.genesisTime = genesisTrie.GenesisTime()
}
go slotutil.CountdownToGenesis(ctx, time.Unix(int64(s.genesisTime), 0), s.numValidators)
gRoot, err := genesisTrie.HashTreeRoot(s.ctx)
if err != nil {
log.Fatalf("Could not hash tree root genesis state: %v", err)
}
go slotutil.CountdownToGenesis(ctx, time.Unix(int64(s.genesisTime), 0), s.numValidators, gRoot)
if err := s.saveGenesisState(ctx, genesisTrie); err != nil {
log.Fatalf("Could not save interop genesis state %v", err)

View File

@@ -50,12 +50,14 @@ var appFlags = []cli.Flag{
flags.InteropGenesisTimeFlag,
flags.SlotsPerArchivedPoint,
flags.EnableDebugRPCEndpoints,
flags.EnableBackupWebhookFlag,
flags.BackupWebhookOutputDir,
flags.SubscribeToAllSubnets,
flags.HistoricalSlasherNode,
flags.ChainID,
flags.NetworkID,
flags.WeakSubjectivityCheckpt,
flags.Eth1HeaderReqLimit,
cmd.EnableBackupWebhookFlag,
cmd.BackupWebhookOutputDir,
cmd.MinimalConfigFlag,
cmd.E2EConfigFlag,
cmd.RPCMaxPageSizeFlag,

View File

@@ -29,6 +29,7 @@ go_library(
"//beacon-chain/sync:go_default_library",
"//beacon-chain/sync/initial-sync:go_default_library",
"//shared:go_default_library",
"//shared/backuputil:go_default_library",
"//shared/cmd:go_default_library",
"//shared/debug:go_default_library",
"//shared/event:go_default_library",

View File

@@ -36,6 +36,7 @@ import (
regularsync "github.com/prysmaticlabs/prysm/beacon-chain/sync"
initialsync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync"
"github.com/prysmaticlabs/prysm/shared"
"github.com/prysmaticlabs/prysm/shared/backuputil"
"github.com/prysmaticlabs/prysm/shared/cmd"
"github.com/prysmaticlabs/prysm/shared/debug"
"github.com/prysmaticlabs/prysm/shared/event"
@@ -486,12 +487,13 @@ func (b *BeaconNode) registerPOWChainService() error {
}
cfg := &powchain.Web3ServiceConfig{
HTTPEndPoint: b.cliCtx.String(flags.HTTPWeb3ProviderFlag.Name),
DepositContract: common.HexToAddress(depAddress),
BeaconDB: b.db,
DepositCache: b.depositCache,
StateNotifier: b,
StateGen: b.stateGen,
HTTPEndPoint: b.cliCtx.String(flags.HTTPWeb3ProviderFlag.Name),
DepositContract: common.HexToAddress(depAddress),
BeaconDB: b.db,
DepositCache: b.depositCache,
StateNotifier: b,
StateGen: b.stateGen,
Eth1HeaderReqLimit: b.cliCtx.Uint64(flags.Eth1HeaderReqLimit.Name),
}
web3Service, err := powchain.NewService(b.ctx, cfg)
if err != nil {
@@ -507,7 +509,10 @@ func (b *BeaconNode) registerPOWChainService() error {
}
}
if len(knownContract) > 0 && !bytes.Equal(cfg.DepositContract.Bytes(), knownContract) {
return fmt.Errorf("database contract is %#x but tried to run with %#x", knownContract, cfg.DepositContract.Bytes())
return fmt.Errorf("database contract is %#x but tried to run with %#x. This likely means "+
"you are trying to run on a different network than what the database contains. You can run once with "+
"'--clear-db' to wipe the old database or use an alternative data directory with '--datadir'",
knownContract, cfg.DepositContract.Bytes())
}
log.Infof("Deposit contract: %#x", cfg.DepositContract.Bytes())
@@ -598,6 +603,8 @@ func (b *BeaconNode) registerRPCService() error {
host := b.cliCtx.String(flags.RPCHost.Name)
port := b.cliCtx.String(flags.RPCPort.Name)
beaconMonitoringHost := b.cliCtx.String(cmd.MonitoringHostFlag.Name)
beaconMonitoringPort := b.cliCtx.Int(flags.MonitoringPortFlag.Name)
cert := b.cliCtx.String(flags.CertFlag.Name)
key := b.cliCtx.String(flags.KeyFlag.Name)
mockEth1DataVotes := b.cliCtx.Bool(flags.InteropMockEth1DataVotesFlag.Name)
@@ -607,6 +614,8 @@ func (b *BeaconNode) registerRPCService() error {
rpcService := rpc.NewService(b.ctx, &rpc.Config{
Host: host,
Port: port,
BeaconMonitoringHost: beaconMonitoringHost,
BeaconMonitoringPort: beaconMonitoringPort,
CertFlag: cert,
KeyFlag: key,
BeaconDB: b.db,
@@ -654,12 +663,12 @@ func (b *BeaconNode) registerPrometheusService(cliCtx *cli.Context) error {
panic(err)
}
if cliCtx.IsSet(flags.EnableBackupWebhookFlag.Name) {
if cliCtx.IsSet(cmd.EnableBackupWebhookFlag.Name) {
additionalHandlers = append(
additionalHandlers,
prometheus.Handler{
Path: "/db/backup",
Handler: db.BackupHandler(b.db, cliCtx.String(flags.BackupWebhookOutputDir.Name)),
Handler: backuputil.BackupHandler(b.db, cliCtx.String(cmd.BackupWebhookOutputDir.Name)),
},
)
}

View File

@@ -6,12 +6,6 @@ import (
)
var (
numPendingAttesterSlashingFailedSigVerify = promauto.NewCounter(
prometheus.CounterOpts{
Name: "pending_attester_slashing_fail_sig_verify_total",
Help: "Times an pending attester slashing fails sig verification",
},
)
numPendingAttesterSlashings = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "num_pending_attester_slashings",
@@ -24,18 +18,6 @@ var (
Help: "Number of attester slashings included in blocks",
},
)
attesterSlashingReattempts = promauto.NewCounter(
prometheus.CounterOpts{
Name: "attester_slashing_reattempts_total",
Help: "Times an attester slashing for an already slashed validator is received",
},
)
numPendingProposerSlashingFailedSigVerify = promauto.NewCounter(
prometheus.CounterOpts{
Name: "pending_proposer_slashing_fail_sig_verify_total",
Help: "Times an pending proposer slashing fails sig verification",
},
)
numPendingProposerSlashings = promauto.NewGauge(
prometheus.GaugeOpts{
Name: "num_pending_proposer_slashings",
@@ -48,10 +30,4 @@ var (
Help: "Number of proposer slashings included in blocks",
},
)
proposerSlashingReattempts = promauto.NewCounter(
prometheus.CounterOpts{
Name: "proposer_slashing_reattempts_total",
Help: "Times a proposer slashing for an already slashed validator is received",
},
)
)

View File

@@ -124,7 +124,6 @@ func (p *Pool) InsertAttesterSlashing(
defer span.End()
if err := blocks.VerifyAttesterSlashing(ctx, state, slashing); err != nil {
numPendingAttesterSlashingFailedSigVerify.Inc()
return errors.Wrap(err, "could not verify attester slashing")
}
@@ -139,7 +138,6 @@ func (p *Pool) InsertAttesterSlashing(
// If the validator has already exited, has already been slashed, or if its index
// has been recently included in the pool of slashings, skip including this indice.
if !ok {
attesterSlashingReattempts.Inc()
cantSlash = append(cantSlash, val)
continue
}
@@ -150,7 +148,6 @@ func (p *Pool) InsertAttesterSlashing(
return p.pendingAttesterSlashing[i].validatorToSlash >= val
})
if found != len(p.pendingAttesterSlashing) && p.pendingAttesterSlashing[found].validatorToSlash == val {
attesterSlashingReattempts.Inc()
cantSlash = append(cantSlash, val)
continue
}
@@ -185,7 +182,6 @@ func (p *Pool) InsertProposerSlashing(
defer span.End()
if err := blocks.VerifyProposerSlashing(state, slashing); err != nil {
numPendingProposerSlashingFailedSigVerify.Inc()
return errors.Wrap(err, "could not verify proposer slashing")
}
@@ -198,7 +194,6 @@ func (p *Pool) InsertProposerSlashing(
// has been recently included in the pool of slashings, do not process this new
// slashing.
if !ok {
proposerSlashingReattempts.Inc()
return fmt.Errorf("validator at index %d cannot be slashed", idx)
}

View File

@@ -42,6 +42,7 @@ go_library(
"//beacon-chain/core/feed:go_default_library",
"//beacon-chain/core/feed/state:go_default_library",
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/flags:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
@@ -60,7 +61,6 @@ go_library(
"//shared/timeutils:go_default_library",
"//shared/traceutil:go_default_library",
"//shared/version:go_default_library",
"@com_github_dgraph_io_ristretto//:go_default_library",
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/discover:go_default_library",
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
@@ -81,9 +81,8 @@ go_library(
"@com_github_libp2p_go_libp2p_noise//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_libp2p_secio//:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr_net//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prometheus_client_golang//prometheus:go_default_library",
"@com_github_prometheus_client_golang//prometheus/promauto:go_default_library",
@@ -124,6 +123,7 @@ go_test(
"//beacon-chain/core/helpers:go_default_library",
"//beacon-chain/p2p/encoder:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/peers/scorers:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
@@ -152,6 +152,7 @@ go_test(
"@com_github_libp2p_go_libp2p_core//host:go_default_library",
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
"@com_github_libp2p_go_libp2p_noise//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//:go_default_library",
"@com_github_libp2p_go_libp2p_pubsub//pb:go_default_library",
"@com_github_libp2p_go_libp2p_swarm//testing:go_default_library",

View File

@@ -9,15 +9,21 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
"github.com/multiformats/go-multiaddr"
filter "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr-net"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/sirupsen/logrus"
)
// limit for rate limiter when processing new inbound dials.
const ipLimit = 4
const (
// Limit for rate limiter when processing new inbound dials.
ipLimit = 4
// burst limit for inbound dials.
const ipBurst = 8
// Burst limit for inbound dials.
ipBurst = 8
// High watermark buffer signifies the buffer till which
// we will handle inbound requests.
highWatermarkBuffer = 10
)
// InterceptPeerDial tests whether we're permitted to Dial the specified peer.
func (s *Service) InterceptPeerDial(_ peer.ID) (allow bool) {
@@ -44,7 +50,7 @@ func (s *Service) InterceptAccept(n network.ConnMultiaddrs) (allow bool) {
"reason": "exceeded dial limit"}).Trace("Not accepting inbound dial from ip address")
return false
}
if s.isPeerAtLimit() {
if s.isPeerAtLimit(true /* inbound */) {
log.WithFields(logrus.Fields{"peer": n.RemoteMultiaddr(),
"reason": "at peer limit"}).Trace("Not accepting inbound dial")
return false

View File

@@ -44,6 +44,10 @@ func TestPeer_AtMaxLimit(t *testing.T) {
require.NoError(t, err)
}()
for i := 0; i < highWatermarkBuffer; i++ {
addPeer(t, s.peers, peers.PeerConnected)
}
// create alternate host
listen, err = multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr2, 3000))
require.NoError(t, err, "Failed to p2p listen")

View File

@@ -68,7 +68,7 @@ func (s *Service) listenForNewNodes() {
if s.ctx.Err() != nil {
break
}
if s.isPeerAtLimit() {
if s.isPeerAtLimit(false /* inbound */) {
// Pause the main loop for a period to stop looking
// for new peers.
log.Trace("Not looking for peers, at peer limit")
@@ -276,9 +276,14 @@ func (s *Service) filterPeer(node *enode.Node) bool {
// This checks our set max peers in our config, and
// determines whether our currently connected and
// active peers are above our set max peer limit.
func (s *Service) isPeerAtLimit() bool {
func (s *Service) isPeerAtLimit(inbound bool) bool {
numOfConns := len(s.host.Network().Peers())
maxPeers := int(s.cfg.MaxPeers)
// If we are measuring the limit for inbound peers
// we apply the high watermark buffer.
if inbound {
maxPeers += highWatermarkBuffer
}
activePeers := len(s.Peers().Active())
return activePeers >= maxPeers || numOfConns >= maxPeers

View File

@@ -14,10 +14,21 @@ import (
"github.com/ethereum/go-ethereum/p2p/discover"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/kevinms/leakybucket-go"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/go-bitfield"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
testp2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/iputils"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
@@ -226,3 +237,48 @@ func TestHostIsResolved(t *testing.T) {
newIP := list.Self().IP()
assert.Equal(t, exampleIP, newIP.String(), "Did not resolve to expected IP")
}
func TestInboundPeerLimit(t *testing.T) {
fakePeer := testp2p.NewTestP2P(t)
s := &Service{
cfg: &Config{MaxPeers: 30},
ipLimiter: leakybucket.NewCollector(ipLimit, ipBurst, false),
peers: peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{},
}),
host: fakePeer.BHost,
}
for i := 0; i < 30; i++ {
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
require.Equal(t, true, s.isPeerAtLimit(false), "not at limit for outbound peers")
require.Equal(t, false, s.isPeerAtLimit(true), "at limit for inbound peers")
for i := 0; i < highWatermarkBuffer; i++ {
_ = addPeer(t, s.peers, peerdata.PeerConnectionState(ethpb.ConnectionState_CONNECTED))
}
require.Equal(t, true, s.isPeerAtLimit(true), "not at limit for inbound peers")
}
// addPeer is a helper to add a peer with a given connection state)
func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState) peer.ID {
// Set up some peers with different states
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
_, err := rand.Read(idBytes)
require.NoError(t, err)
mhBytes = append(mhBytes, idBytes...)
id, err := peer.IDFromBytes(mhBytes)
require.NoError(t, err)
p.Add(new(enr.Record), id, nil, network.DirUnknown)
p.SetConnectionState(id, state)
p.SetMetadata(id, &pb.MetaData{
SeqNumber: 0,
Attnets: bitfield.NewBitvector64(),
})
return id
}

View File

@@ -8,7 +8,6 @@ import (
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/peer"
noise "github.com/libp2p/go-libp2p-noise"
secio "github.com/libp2p/go-libp2p-secio"
ma "github.com/multiformats/go-multiaddr"
"github.com/pkg/errors"
"github.com/prysmaticlabs/prysm/shared/version"
@@ -37,7 +36,7 @@ func (s *Service) buildOptions(ip net.IP, priKey *ecdsa.PrivateKey) []libp2p.Opt
libp2p.ConnectionGater(s),
}
options = append(options, libp2p.Security(noise.ID, noise.New), libp2p.Security(secio.ID, secio.New))
options = append(options, libp2p.Security(noise.ID, noise.New))
if cfg.EnableUPnP {
options = append(options, libp2p.NATPortMap()) //Allow to use UPnP

View File

@@ -10,8 +10,8 @@ import (
const (
// overlay parameters
gossipSubD = 6 // topic stable mesh target count
gossipSubDlo = 5 // topic stable mesh low watermark
gossipSubD = 8 // topic stable mesh target count
gossipSubDlo = 6 // topic stable mesh low watermark
gossipSubDhi = 12 // topic stable mesh high watermark
// gossip parameters

View File

@@ -18,6 +18,7 @@ go_library(
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
"@com_github_libp2p_go_libp2p_core//peer:go_default_library",
"@com_github_multiformats_go_multiaddr//:go_default_library",
"@com_github_multiformats_go_multiaddr//net:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
],
)

View File

@@ -48,10 +48,11 @@ type PeerData struct {
Enr *enr.Record
NextValidTime time.Time
// Chain related data.
ChainState *pb.Status
MetaData *pb.MetaData
ChainStateLastUpdated time.Time
// Scorers related data.
MetaData *pb.MetaData
ChainState *pb.Status
ChainStateLastUpdated time.Time
ChainStateValidationError error
// Scorers internal data.
BadResponses int
ProcessedBlocks uint64
BlockProviderUpdated time.Time

View File

@@ -2,7 +2,6 @@ package peers_test
import (
"io/ioutil"
"os"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
@@ -11,24 +10,21 @@ import (
)
func TestMain(m *testing.M) {
run := func() int {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,
BlockBatchLimitBurstFactor: 10,
})
defer func() {
flags.Init(resetFlags)
}()
return m.Run()
}
os.Exit(run())
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,
BlockBatchLimitBurstFactor: 10,
})
defer func() {
flags.Init(resetFlags)
}()
m.Run()
}

View File

@@ -6,6 +6,7 @@ go_library(
srcs = [
"bad_responses.go",
"block_providers.go",
"peer_status.go",
"service.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers",
@@ -13,6 +14,8 @@ go_library(
deps = [
"//beacon-chain/flags:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/rand:go_default_library",
"//shared/timeutils:go_default_library",
@@ -25,6 +28,7 @@ go_test(
srcs = [
"bad_responses_test.go",
"block_providers_test.go",
"peer_status_test.go",
"scorers_test.go",
"service_test.go",
],
@@ -33,6 +37,8 @@ go_test(
"//beacon-chain/flags:go_default_library",
"//beacon-chain/p2p/peers:go_default_library",
"//beacon-chain/p2p/peers/peerdata:go_default_library",
"//beacon-chain/p2p/types:go_default_library",
"//proto/beacon/p2p/v1:go_default_library",
"//shared/featureconfig:go_default_library",
"//shared/rand:go_default_library",
"//shared/testutil/assert:go_default_library",

View File

@@ -1,18 +1,17 @@
package scorers
import (
"context"
"time"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
)
var _ Scorer = (*BadResponsesScorer)(nil)
const (
// DefaultBadResponsesThreshold defines how many bad responses to tolerate before peer is deemed bad.
DefaultBadResponsesThreshold = 6
// DefaultBadResponsesWeight is a default weight. Since score represents penalty, it has negative weight.
DefaultBadResponsesWeight = -1.0
// DefaultBadResponsesDecayInterval defines how often to decay previous statistics.
// Every interval bad responses counter will be decremented by 1.
DefaultBadResponsesDecayInterval = time.Hour
@@ -20,7 +19,6 @@ const (
// BadResponsesScorer represents bad responses scoring service.
type BadResponsesScorer struct {
ctx context.Context
config *BadResponsesScorerConfig
store *peerdata.Store
}
@@ -29,29 +27,22 @@ type BadResponsesScorer struct {
type BadResponsesScorerConfig struct {
// Threshold specifies number of bad responses tolerated, before peer is banned.
Threshold int
// Weight defines weight of bad response/threshold ratio on overall score.
Weight float64
// DecayInterval specifies how often bad response stats should be decayed.
DecayInterval time.Duration
}
// newBadResponsesScorer creates new bad responses scoring service.
func newBadResponsesScorer(
ctx context.Context, store *peerdata.Store, config *BadResponsesScorerConfig) *BadResponsesScorer {
func newBadResponsesScorer(store *peerdata.Store, config *BadResponsesScorerConfig) *BadResponsesScorer {
if config == nil {
config = &BadResponsesScorerConfig{}
}
scorer := &BadResponsesScorer{
ctx: ctx,
config: config,
store: store,
}
if scorer.config.Threshold == 0 {
scorer.config.Threshold = DefaultBadResponsesThreshold
}
if scorer.config.Weight == 0.0 {
scorer.config.Weight = DefaultBadResponsesWeight
}
if scorer.config.DecayInterval == 0 {
scorer.config.DecayInterval = DefaultBadResponsesDecayInterval
}
@@ -65,8 +56,11 @@ func (s *BadResponsesScorer) Score(pid peer.ID) float64 {
return s.score(pid)
}
// score is a lock-free version of ScoreBadResponses.
// score is a lock-free version of Score.
func (s *BadResponsesScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
peerData, ok := s.store.PeerData(pid)
if !ok {
@@ -74,7 +68,8 @@ func (s *BadResponsesScorer) score(pid peer.ID) float64 {
}
if peerData.BadResponses > 0 {
score = float64(peerData.BadResponses) / float64(s.config.Threshold)
score = score * s.config.Weight
// Since score represents a penalty, negate it.
score *= -1
}
return score
}
@@ -131,7 +126,7 @@ func (s *BadResponsesScorer) isBadPeer(pid peer.ID) bool {
return false
}
// BadPeers returns the peers that are bad.
// BadPeers returns the peers that are considered bad.
func (s *BadResponsesScorer) BadPeers() []peer.ID {
s.store.RLock()
defer s.store.RUnlock()

View File

@@ -86,7 +86,6 @@ func TestScorers_BadResponses_Decay(t *testing.T) {
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
Weight: 1,
},
},
})

View File

@@ -1,7 +1,6 @@
package scorers
import (
"context"
"fmt"
"math"
"sort"
@@ -15,6 +14,8 @@ import (
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
var _ Scorer = (*BlockProviderScorer)(nil)
const (
// DefaultBlockProviderProcessedBatchWeight is a default reward weight of a processed batch of blocks.
DefaultBlockProviderProcessedBatchWeight = float64(0.1)
@@ -35,7 +36,6 @@ const (
// BlockProviderScorer represents block provider scoring service.
type BlockProviderScorer struct {
ctx context.Context
config *BlockProviderScorerConfig
store *peerdata.Store
// maxScore is a cached value for maximum attainable block provider score.
@@ -62,13 +62,11 @@ type BlockProviderScorerConfig struct {
}
// newBlockProviderScorer creates block provider scoring service.
func newBlockProviderScorer(
ctx context.Context, store *peerdata.Store, config *BlockProviderScorerConfig) *BlockProviderScorer {
func newBlockProviderScorer(store *peerdata.Store, config *BlockProviderScorerConfig) *BlockProviderScorer {
if config == nil {
config = &BlockProviderScorerConfig{}
}
scorer := &BlockProviderScorer{
ctx: ctx,
config: config,
store: store,
}
@@ -176,6 +174,20 @@ func (s *BlockProviderScorer) processedBlocks(pid peer.ID) uint64 {
return 0
}
// IsBadPeer states if the peer is to be considered bad.
// Block provider scorer cannot guarantee that lower score of a peer is indeed a sign of a bad peer.
// Therefore this scorer never marks peers as bad, and relies on scores to probabilistically sort
// out low-scorers (see WeightSorted method).
func (s *BlockProviderScorer) IsBadPeer(_ peer.ID) bool {
return false
}
// BadPeers returns the peers that are considered bad.
// No peers are considered bad by block providers scorer.
func (s *BlockProviderScorer) BadPeers() []peer.ID {
return []peer.ID{}
}
// Decay updates block provider counters by decaying them.
// This urges peers to keep up the performance to continue getting a high score (and allows
// new peers to contest previously high scoring ones).

View File

@@ -11,6 +11,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/rand"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/timeutils"
@@ -438,17 +439,20 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
},
}
peerStatusGen := func() *peers.Status {
return peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
ProcessedBatchWeight: 0.05,
ProcessedBlocksCap: 20 * batchSize,
Decay: 10 * batchSize,
},
},
})
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
ProcessedBatchWeight: 0.05,
ProcessedBlocksCap: 20 * batchSize,
Decay: 10 * batchSize,
},
},
})
peerStatuses := peerStatusGen()
scorer := peerStatuses.Scorers().BlockProviderScorer()
if tt.update != nil {
tt.update(scorer)
@@ -456,4 +460,29 @@ func TestScorers_BlockProvider_FormatScorePretty(t *testing.T) {
tt.check(scorer)
})
}
t.Run("peer scorer disabled", func(t *testing.T) {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
EnablePeerScorer: false,
})
defer resetCfg()
peerStatuses := peerStatusGen()
scorer := peerStatuses.Scorers().BlockProviderScorer()
assert.Equal(t, "disabled", scorer.FormatScorePretty("peer1"))
})
}
func TestScorers_BlockProvider_BadPeerMarking(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().BlockProviderScorer()
assert.Equal(t, false, scorer.IsBadPeer("peer1"), "Unexpected status for unregistered peer")
scorer.IncrementProcessedBlocks("peer1", 64)
assert.Equal(t, false, scorer.IsBadPeer("peer1"))
assert.Equal(t, 0, len(scorer.BadPeers()))
}

View File

@@ -0,0 +1,143 @@
package scorers
import (
"errors"
"math"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/timeutils"
)
var _ Scorer = (*PeerStatusScorer)(nil)
// PeerStatusScorer represents scorer that evaluates peers based on their statuses.
// Peer statuses are updated by regularly polling peers (see sync/rpc_status.go).
type PeerStatusScorer struct {
config *PeerStatusScorerConfig
store *peerdata.Store
ourHeadSlot uint64
highestPeerHeadSlot uint64
}
// PeerStatusScorerConfig holds configuration parameters for peer status scoring service.
type PeerStatusScorerConfig struct{}
// newPeerStatusScorer creates new peer status scoring service.
func newPeerStatusScorer(store *peerdata.Store, config *PeerStatusScorerConfig) *PeerStatusScorer {
if config == nil {
config = &PeerStatusScorerConfig{}
}
return &PeerStatusScorer{
config: config,
store: store,
}
}
// Score returns calculated peer score.
func (s *PeerStatusScorer) Score(pid peer.ID) float64 {
s.store.RLock()
defer s.store.RUnlock()
return s.score(pid)
}
// score is a lock-free version of Score.
func (s *PeerStatusScorer) score(pid peer.ID) float64 {
if s.isBadPeer(pid) {
return BadPeerScore
}
score := float64(0)
peerData, ok := s.store.PeerData(pid)
if !ok || peerData.ChainState == nil {
return score
}
if peerData.ChainState.HeadSlot < s.ourHeadSlot {
return score
}
// Calculate score as a ratio to the known maximum head slot.
// The closer the current peer's head slot to the maximum, the higher is the calculated score.
if s.highestPeerHeadSlot > 0 {
score = float64(peerData.ChainState.HeadSlot) / float64(s.highestPeerHeadSlot)
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
}
return score
}
// IsBadPeer states if the peer is to be considered bad.
func (s *PeerStatusScorer) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeer(pid)
}
// isBadPeer is lock-free version of IsBadPeer.
func (s *PeerStatusScorer) isBadPeer(pid peer.ID) bool {
peerData, ok := s.store.PeerData(pid)
if !ok {
return false
}
// Mark peer as bad, if the latest error is one of the terminal ones.
terminalErrs := []error{
p2ptypes.ErrWrongForkDigestVersion,
}
for _, err := range terminalErrs {
if errors.Is(peerData.ChainStateValidationError, err) {
return true
}
}
return false
}
// BadPeers returns the peers that are considered bad.
func (s *PeerStatusScorer) BadPeers() []peer.ID {
s.store.RLock()
defer s.store.RUnlock()
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
return badPeers
}
// SetPeerStatus sets chain state data for a given peer.
func (s *PeerStatusScorer) SetPeerStatus(pid peer.ID, chainState *pb.Status, validationError error) {
s.store.Lock()
defer s.store.Unlock()
peerData := s.store.PeerDataGetOrCreate(pid)
peerData.ChainState = chainState
peerData.ChainStateLastUpdated = timeutils.Now()
peerData.ChainStateValidationError = validationError
// Update maximum known head slot (scores will be calculated with respect to that maximum value).
if chainState != nil && chainState.HeadSlot > s.highestPeerHeadSlot {
s.highestPeerHeadSlot = chainState.HeadSlot
}
}
// PeerStatus gets the chain state of the given remote peer.
// This can return nil if there is no known chain state for the peer.
// This will error if the peer does not exist.
func (s *PeerStatusScorer) PeerStatus(pid peer.ID) (*pb.Status, error) {
s.store.RLock()
defer s.store.RUnlock()
return s.peerStatus(pid)
}
// peerStatus lock-free version of PeerStatus.
func (s *PeerStatusScorer) peerStatus(pid peer.ID) (*pb.Status, error) {
if peerData, ok := s.store.PeerData(pid); ok {
return peerData.ChainState, nil
}
return nil, peerdata.ErrPeerUnknown
}
// SetHeadSlot updates known head slot.
func (s *PeerStatusScorer) SetHeadSlot(slot uint64) {
s.ourHeadSlot = slot
}

View File

@@ -0,0 +1,197 @@
package scorers_test
import (
"context"
"testing"
"github.com/libp2p/go-libp2p-core/peer"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
p2ptypes "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
"github.com/prysmaticlabs/prysm/shared/testutil/require"
)
func TestScorers_PeerStatus_Score(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
tests := []struct {
name string
update func(scorer *scorers.PeerStatusScorer)
check func(scorer *scorers.PeerStatusScorer)
}{
{
name: "nonexistent peer",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(64)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent bad peer",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, p2ptypes.ErrWrongForkDigestVersion)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, scorers.BadPeerScore, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer no head slot for the host node is known",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 1.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer head is before ours",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(128)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 64,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer partial score",
update: func(scorer *scorers.PeerStatusScorer) {
headSlot := uint64(128)
scorer.SetHeadSlot(headSlot)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 64,
}, nil)
// Set another peer to a higher score.
scorer.SetPeerStatus("peer2", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 128,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
headSlot := uint64(128)
assert.Equal(t, float64(headSlot+64)/float64(headSlot+128), scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer full score",
update: func(scorer *scorers.PeerStatusScorer) {
headSlot := uint64(128)
scorer.SetHeadSlot(headSlot)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: headSlot + 64,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 1.0, scorer.Score("peer1"), "Unexpected score")
},
},
{
name: "existent peer no max known slot",
update: func(scorer *scorers.PeerStatusScorer) {
scorer.SetHeadSlot(0)
scorer.SetPeerStatus("peer1", &pb.Status{
HeadRoot: make([]byte, 32),
HeadSlot: 0,
}, nil)
},
check: func(scorer *scorers.PeerStatusScorer) {
assert.Equal(t, 0.0, scorer.Score("peer1"), "Unexpected score")
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
peerStatuses := peers.NewStatus(ctx, &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
scorer := peerStatuses.Scorers().PeerStatusScorer()
if tt.update != nil {
tt.update(scorer)
}
tt.check(scorer)
})
}
}
func TestScorers_PeerStatus_IsBadPeer(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
pid := peer.ID("peer1")
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid))
}
func TestScorers_PeerStatus_BadPeers(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
pid1 := peer.ID("peer1")
pid2 := peer.ID("peer2")
pid3 := peer.ID("peer3")
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid1, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid2, &pb.Status{}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus(pid3, &pb.Status{}, p2ptypes.ErrWrongForkDigestVersion)
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid1))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid1))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer(pid2))
assert.Equal(t, false, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid2))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer(pid3))
assert.Equal(t, true, peerStatuses.Scorers().PeerStatusScorer().IsBadPeer(pid3))
assert.Equal(t, 2, len(peerStatuses.Scorers().PeerStatusScorer().BadPeers()))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}
func TestScorers_PeerStatus_PeerStatus(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
status, err := peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
require.ErrorContains(t, peerdata.ErrPeerUnknown.Error(), err)
assert.Equal(t, (*pb.Status)(nil), status)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer1", &pb.Status{
HeadSlot: 128,
}, nil)
peerStatuses.Scorers().PeerStatusScorer().SetPeerStatus("peer2", &pb.Status{
HeadSlot: 128,
}, p2ptypes.ErrInvalidEpoch)
status, err = peerStatuses.Scorers().PeerStatusScorer().PeerStatus("peer1")
require.NoError(t, err)
assert.Equal(t, uint64(128), status.HeadSlot)
assert.Equal(t, nil, peerStatuses.Scorers().ValidationError("peer1"))
assert.ErrorContains(t, p2ptypes.ErrInvalidEpoch.Error(), peerStatuses.Scorers().ValidationError("peer2"))
assert.Equal(t, nil, peerStatuses.Scorers().ValidationError("peer3"))
}

View File

@@ -3,7 +3,6 @@ package scorers_test
import (
"io/ioutil"
"math"
"os"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
@@ -13,26 +12,23 @@ import (
)
func TestMain(m *testing.M) {
run := func() int {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{
EnablePeerScorer: true,
})
defer resetCfg()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,
BlockBatchLimitBurstFactor: 10,
})
defer func() {
flags.Init(resetFlags)
}()
return m.Run()
}
os.Exit(run())
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
BlockBatchLimit: 64,
BlockBatchLimitBurstFactor: 10,
})
defer func() {
flags.Init(resetFlags)
}()
m.Run()
}
// roundScore returns score rounded in accordance with the score manager's rounding factor.

View File

@@ -9,35 +9,58 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
)
var _ Scorer = (*Service)(nil)
// ScoreRoundingFactor defines how many digits to keep in decimal part.
// This parameter is used in math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor.
const ScoreRoundingFactor = 10000
// BadPeerScore defines score that is returned for a bad peer (all other metrics are ignored).
const BadPeerScore = -1.00
// Scorer defines minimum set of methods every peer scorer must expose.
type Scorer interface {
Score(pid peer.ID) float64
IsBadPeer(pid peer.ID) bool
BadPeers() []peer.ID
}
// Service manages peer scorers that are used to calculate overall peer score.
type Service struct {
ctx context.Context
store *peerdata.Store
scorers struct {
badResponsesScorer *BadResponsesScorer
blockProviderScorer *BlockProviderScorer
peerStatusScorer *PeerStatusScorer
}
weights map[Scorer]float64
totalWeight float64
}
// Config holds configuration parameters for scoring service.
type Config struct {
BadResponsesScorerConfig *BadResponsesScorerConfig
BlockProviderScorerConfig *BlockProviderScorerConfig
PeerStatusScorerConfig *PeerStatusScorerConfig
}
// NewService provides fully initialized peer scoring service.
func NewService(ctx context.Context, store *peerdata.Store, config *Config) *Service {
s := &Service{
ctx: ctx,
store: store,
store: store,
weights: make(map[Scorer]float64),
}
s.scorers.badResponsesScorer = newBadResponsesScorer(ctx, store, config.BadResponsesScorerConfig)
s.scorers.blockProviderScorer = newBlockProviderScorer(ctx, store, config.BlockProviderScorerConfig)
go s.loop(s.ctx)
// Register scorers.
s.scorers.badResponsesScorer = newBadResponsesScorer(store, config.BadResponsesScorerConfig)
s.setScorerWeight(s.scorers.badResponsesScorer, 1.0)
s.scorers.blockProviderScorer = newBlockProviderScorer(store, config.BlockProviderScorerConfig)
s.setScorerWeight(s.scorers.blockProviderScorer, 1.0)
s.scorers.peerStatusScorer = newPeerStatusScorer(store, config.PeerStatusScorerConfig)
s.setScorerWeight(s.scorers.peerStatusScorer, 0.0)
// Start background tasks.
go s.loop(ctx)
return s
}
@@ -52,6 +75,22 @@ func (s *Service) BlockProviderScorer() *BlockProviderScorer {
return s.scorers.blockProviderScorer
}
// PeerStatusScorer exposes peer chain status scoring service.
func (s *Service) PeerStatusScorer() *PeerStatusScorer {
return s.scorers.peerStatusScorer
}
// ActiveScorersCount returns number of scorers that can affect score (have non-zero weight).
func (s *Service) ActiveScorersCount() int {
cnt := 0
for _, w := range s.weights {
if w > 0 {
cnt++
}
}
return cnt
}
// Score returns calculated peer score across all tracked metrics.
func (s *Service) Score(pid peer.ID) float64 {
s.store.RLock()
@@ -61,11 +100,57 @@ func (s *Service) Score(pid peer.ID) float64 {
if _, ok := s.store.PeerData(pid); !ok {
return 0
}
score += s.scorers.badResponsesScorer.score(pid)
score += s.scorers.blockProviderScorer.score(pid)
score += s.scorers.badResponsesScorer.score(pid) * s.scorerWeight(s.scorers.badResponsesScorer)
score += s.scorers.blockProviderScorer.score(pid) * s.scorerWeight(s.scorers.blockProviderScorer)
score += s.scorers.peerStatusScorer.score(pid) * s.scorerWeight(s.scorers.peerStatusScorer)
return math.Round(score*ScoreRoundingFactor) / ScoreRoundingFactor
}
// IsBadPeer traverses all the scorers to see if any of them classifies peer as bad.
func (s *Service) IsBadPeer(pid peer.ID) bool {
s.store.RLock()
defer s.store.RUnlock()
return s.isBadPeer(pid)
}
// isBadPeer is a lock-free version of isBadPeer.
func (s *Service) isBadPeer(pid peer.ID) bool {
if s.scorers.badResponsesScorer.isBadPeer(pid) {
return true
}
if s.scorers.peerStatusScorer.isBadPeer(pid) {
return true
}
return false
}
// BadPeers returns the peers that are considered bad by any of registered scorers.
func (s *Service) BadPeers() []peer.ID {
s.store.RLock()
defer s.store.RUnlock()
badPeers := make([]peer.ID, 0)
for pid := range s.store.Peers() {
if s.isBadPeer(pid) {
badPeers = append(badPeers, pid)
}
}
return badPeers
}
// ValidationError returns peer data validation error, which potentially provides more information
// why peer is considered bad.
func (s *Service) ValidationError(pid peer.ID) error {
s.store.RLock()
defer s.store.RUnlock()
peerData, ok := s.store.PeerData(pid)
if !ok {
return nil
}
return peerData.ChainStateValidationError
}
// loop handles background tasks.
func (s *Service) loop(ctx context.Context) {
decayBadResponsesStats := time.NewTicker(s.scorers.badResponsesScorer.Params().DecayInterval)
@@ -84,3 +169,14 @@ func (s *Service) loop(ctx context.Context) {
}
}
}
// setScorerWeight adds scorer to map of known scorers.
func (s *Service) setScorerWeight(scorer Scorer, weight float64) {
s.weights[scorer] = weight
s.totalWeight += s.weights[scorer]
}
// scorerWeight calculates contribution percentage of a given scorer in total score.
func (s *Service) scorerWeight(scorer Scorer) float64 {
return s.weights[scorer] / s.totalWeight
}

View File

@@ -28,8 +28,8 @@ func TestScorers_Service_Init(t *testing.T) {
t.Run("bad responses scorer", func(t *testing.T) {
params := peerStatuses.Scorers().BadResponsesScorer().Params()
assert.Equal(t, scorers.DefaultBadResponsesThreshold, params.Threshold, "Unexpected threshold value")
assert.Equal(t, scorers.DefaultBadResponsesWeight, params.Weight, "Unexpected weight value")
assert.Equal(t, scorers.DefaultBadResponsesDecayInterval, params.DecayInterval, "Unexpected decay interval value")
assert.Equal(t, scorers.DefaultBadResponsesDecayInterval,
params.DecayInterval, "Unexpected decay interval value")
})
t.Run("block providers scorer", func(t *testing.T) {
@@ -48,7 +48,6 @@ func TestScorers_Service_Init(t *testing.T) {
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
Weight: -1,
DecayInterval: 1 * time.Minute,
},
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
@@ -64,7 +63,6 @@ func TestScorers_Service_Init(t *testing.T) {
t.Run("bad responses scorer", func(t *testing.T) {
params := peerStatuses.Scorers().BadResponsesScorer().Params()
assert.Equal(t, 2, params.Threshold, "Unexpected threshold value")
assert.Equal(t, -1.0, params.Weight, "Unexpected weight value")
assert.Equal(t, 1*time.Minute, params.DecayInterval, "Unexpected decay interval value")
})
@@ -119,7 +117,8 @@ func TestScorers_Service_Score(t *testing.T) {
for _, pid := range pids {
peerStatuses.Add(nil, pid, nil, network.DirUnknown)
// Not yet used peer gets boosted score.
assert.Equal(t, s.BlockProviderScorer().MaxScore(), s.Score(pid), "Unexpected score for not yet used peer")
startScore := s.BlockProviderScorer().MaxScore()
assert.Equal(t, startScore/float64(s.ActiveScorersCount()), s.Score(pid), "Unexpected score for not yet used peer")
}
return s, pids
}
@@ -136,27 +135,29 @@ func TestScorers_Service_Score(t *testing.T) {
t.Run("bad responses score", func(t *testing.T) {
s, pids := setupScorer()
zeroScore := s.BlockProviderScorer().MaxScore()
// Peers start with boosted start score (new peers are boosted by block provider).
startScore := s.BlockProviderScorer().MaxScore() / float64(s.ActiveScorersCount())
penalty := (-1 / float64(s.BadResponsesScorer().Params().Threshold)) / float64(s.ActiveScorersCount())
// Update peers' stats and test the effect on peer order.
s.BadResponsesScorer().Increment("peer2")
assert.DeepEqual(t, pack(s, zeroScore, zeroScore-0.2, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, startScore, startScore+penalty, startScore), peerScores(s, pids))
s.BadResponsesScorer().Increment("peer1")
s.BadResponsesScorer().Increment("peer1")
assert.DeepEqual(t, pack(s, zeroScore-0.4, zeroScore-0.2, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, startScore+2*penalty, startScore+penalty, startScore), peerScores(s, pids))
// See how decaying affects order of peers.
s.BadResponsesScorer().Decay()
assert.DeepEqual(t, pack(s, zeroScore-0.2, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, startScore+penalty, startScore, startScore), peerScores(s, pids))
s.BadResponsesScorer().Decay()
assert.DeepEqual(t, pack(s, zeroScore, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, startScore, startScore, startScore), peerScores(s, pids))
})
t.Run("block providers score", func(t *testing.T) {
s, pids := setupScorer()
s1 := s.BlockProviderScorer()
zeroScore := s.BlockProviderScorer().MaxScore()
batchWeight := s1.Params().ProcessedBatchWeight
startScore := s.BlockProviderScorer().MaxScore() / 2
batchWeight := s1.Params().ProcessedBatchWeight / 2
// Partial batch.
s1.IncrementProcessedBlocks("peer1", batchSize/4)
@@ -164,11 +165,11 @@ func TestScorers_Service_Score(t *testing.T) {
// Single batch.
s1.IncrementProcessedBlocks("peer1", batchSize)
assert.DeepEqual(t, pack(s, batchWeight, zeroScore, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, batchWeight, startScore, startScore), peerScores(s, pids), "Unexpected scores")
// Multiple batches.
s1.IncrementProcessedBlocks("peer2", batchSize*4)
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, zeroScore), peerScores(s, pids), "Unexpected scores")
assert.DeepEqual(t, pack(s, batchWeight, batchWeight*4, startScore), peerScores(s, pids), "Unexpected scores")
// Partial batch.
s1.IncrementProcessedBlocks("peer3", batchSize/2)
@@ -187,25 +188,22 @@ func TestScorers_Service_Score(t *testing.T) {
})
t.Run("overall score", func(t *testing.T) {
// Full score, no penalty.
s, _ := setupScorer()
s1 := s.BlockProviderScorer()
s2 := s.BadResponsesScorer()
batchWeight := s1.Params().ProcessedBatchWeight
batchWeight := s1.Params().ProcessedBatchWeight / float64(s.ActiveScorersCount())
penalty := (-1 / float64(s.BadResponsesScorer().Params().Threshold)) / float64(s.ActiveScorersCount())
// Full score, no penalty.
s1.IncrementProcessedBlocks("peer1", batchSize*5)
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"))
assert.Equal(t, roundScore(batchWeight*5), s.Score("peer1"))
// Now, adjust score by introducing penalty for bad responses.
s2.Increment("peer1")
s2.Increment("peer1")
assert.Equal(t, -0.4, s2.Score("peer1"), "Unexpected bad responses score")
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"), "Unexpected block provider score")
assert.Equal(t, roundScore(batchWeight*5-0.4), s.Score("peer1"), "Unexpected overall score")
assert.Equal(t, roundScore(batchWeight*5+2*penalty), s.Score("peer1"), "Unexpected overall score")
// If peer continues to misbehave, score becomes negative.
s2.Increment("peer1")
assert.Equal(t, -0.6, s2.Score("peer1"), "Unexpected bad responses score")
assert.Equal(t, roundScore(batchWeight*5), s1.Score("peer1"), "Unexpected block provider score")
assert.Equal(t, roundScore(batchWeight*5-0.6), s.Score("peer1"), "Unexpected overall score")
assert.Equal(t, roundScore(batchWeight*5+3*penalty), s.Score("peer1"), "Unexpected overall score")
})
}
@@ -218,7 +216,6 @@ func TestScorers_Service_loop(t *testing.T) {
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 5,
Weight: -0.5,
DecayInterval: 50 * time.Millisecond,
},
BlockProviderScorerConfig: &scorers.BlockProviderScorerConfig{
@@ -264,3 +261,45 @@ func TestScorers_Service_loop(t *testing.T) {
assert.Equal(t, false, s1.IsBadPeer(pid1), "Peer should not be marked as bad")
assert.Equal(t, uint64(0), s2.ProcessedBlocks("peer1"), "No blocks are expected")
}
func TestScorers_Service_IsBadPeer(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
DecayInterval: 50 * time.Second,
},
},
})
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
peerStatuses.Scorers().BadResponsesScorer().Increment("peer1")
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
}
func TestScorers_Service_BadPeers(t *testing.T) {
peerStatuses := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: 2,
DecayInterval: 50 * time.Second,
},
},
})
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 0, len(peerStatuses.Scorers().BadPeers()))
for _, pid := range []peer.ID{"peer1", "peer3"} {
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
peerStatuses.Scorers().BadResponsesScorer().Increment(pid)
}
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer1"))
assert.Equal(t, false, peerStatuses.Scorers().IsBadPeer("peer2"))
assert.Equal(t, true, peerStatuses.Scorers().IsBadPeer("peer3"))
assert.Equal(t, 2, len(peerStatuses.Scorers().BadPeers()))
}

View File

@@ -32,6 +32,7 @@ import (
"github.com/libp2p/go-libp2p-core/network"
"github.com/libp2p/go-libp2p-core/peer"
ma "github.com/multiformats/go-multiaddr"
manet "github.com/multiformats/go-multiaddr/net"
"github.com/prysmaticlabs/go-bitfield"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/peerdata"
@@ -52,14 +53,20 @@ const (
PeerConnecting
)
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
const maxLimitBuffer = 150
const (
// ColocationLimit restricts how many peer identities we can see from a single ip or ipv6 subnet.
ColocationLimit = 5
// Additional buffer beyond current peer limit, from which we can store the relevant peer statuses.
maxLimitBuffer = 150
)
// Status is the structure holding the peer status information.
type Status struct {
ctx context.Context
scorers *scorers.Service
store *peerdata.Store
ctx context.Context
scorers *scorers.Service
store *peerdata.Store
ipTracker map[string]uint64
}
// StatusConfig represents peer status service params.
@@ -76,9 +83,10 @@ func NewStatus(ctx context.Context, config *StatusConfig) *Status {
MaxPeers: maxLimitBuffer + config.PeerLimit,
})
return &Status{
ctx: ctx,
store: store,
scorers: scorers.NewService(ctx, store, config.ScorerParams),
ctx: ctx,
store: store,
scorers: scorers.NewService(ctx, store, config.ScorerParams),
ipTracker: map[string]uint64{},
}
}
@@ -100,11 +108,15 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
if peerData, ok := p.store.PeerData(pid); ok {
// Peer already exists, just update its address info.
prevAddress := peerData.Address
peerData.Address = address
peerData.Direction = direction
if record != nil {
peerData.Enr = record
}
if !sameIP(prevAddress, address) {
p.addIpToTracker(pid)
}
return
}
peerData := &peerdata.PeerData{
@@ -117,6 +129,7 @@ func (p *Status) Add(record *enr.Record, pid peer.ID, address ma.Multiaddr, dire
peerData.Enr = record
}
p.store.SetPeerData(pid, peerData)
p.addIpToTracker(pid)
}
// Address returns the multiaddress of the given remote peer.
@@ -156,25 +169,14 @@ func (p *Status) ENR(pid peer.ID) (*enr.Record, error) {
// SetChainState sets the chain state of the given remote peer.
func (p *Status) SetChainState(pid peer.ID, chainState *pb.Status) {
p.store.Lock()
defer p.store.Unlock()
peerData := p.store.PeerDataGetOrCreate(pid)
peerData.ChainState = chainState
peerData.ChainStateLastUpdated = timeutils.Now()
p.scorers.PeerStatusScorer().SetPeerStatus(pid, chainState, nil)
}
// ChainState gets the chain state of the given remote peer.
// This can return nil if there is no known chain state for the peer.
// This will error if the peer does not exist.
func (p *Status) ChainState(pid peer.ID) (*pb.Status, error) {
p.store.RLock()
defer p.store.RUnlock()
if peerData, ok := p.store.PeerData(pid); ok {
return peerData.ChainState, nil
}
return nil, peerdata.ErrPeerUnknown
return p.scorers.PeerStatusScorer().PeerStatus(pid)
}
// IsActive checks if a peers is active and returns the result appropriately.
@@ -277,10 +279,10 @@ func (p *Status) ChainStateLastUpdated(pid peer.ID) (time.Time, error) {
return timeutils.Now(), peerdata.ErrPeerUnknown
}
// IsBad states if the peer is to be considered bad.
// IsBad states if the peer is to be considered bad (by *any* of the registered scorers).
// If the peer is unknown this will return `false`, which makes using this function easier than returning an error.
func (p *Status) IsBad(pid peer.ID) bool {
return p.scorers.BadResponsesScorer().IsBadPeer(pid)
return p.isfromBadIP(pid) || p.scorers.IsBadPeer(pid)
}
// NextValidTime gets the earliest possible time it is to contact/dial
@@ -463,6 +465,7 @@ func (p *Status) Prune() {
for _, peerData := range peersToPrune {
p.store.DeletePeerData(peerData.pid)
}
p.tallyIPTracker()
}
// BestFinalized returns the highest finalized epoch equal to or higher than ours that is agreed
@@ -579,6 +582,88 @@ func (p *Status) HighestEpoch() uint64 {
return helpers.SlotToEpoch(highestSlot)
}
func (p *Status) isfromBadIP(pid peer.ID) bool {
p.store.RLock()
defer p.store.RUnlock()
peerData, ok := p.store.PeerData(pid)
if !ok {
return false
}
if peerData.Address == nil {
return false
}
ip, err := manet.ToIP(peerData.Address)
if err != nil {
return true
}
if val, ok := p.ipTracker[ip.String()]; ok {
if val > ColocationLimit {
return true
}
}
return false
}
func (p *Status) addIpToTracker(pid peer.ID) {
data, ok := p.store.PeerData(pid)
if !ok {
return
}
if data.Address == nil {
return
}
ip, err := manet.ToIP(data.Address)
if err != nil {
// Should never happen, it is
// assumed every IP coming in
// is a valid ip.
return
}
// Ignore loopback addresses.
if ip.IsLoopback() {
return
}
stringIP := ip.String()
p.ipTracker[stringIP] += 1
}
func (p *Status) tallyIPTracker() {
tracker := map[string]uint64{}
// Iterate through all peers.
for _, peerData := range p.store.Peers() {
if peerData.Address == nil {
continue
}
ip, err := manet.ToIP(peerData.Address)
if err != nil {
// Should never happen, it is
// assumed every IP coming in
// is a valid ip.
continue
}
stringIP := ip.String()
tracker[stringIP] += 1
}
p.ipTracker = tracker
}
func sameIP(firstAddr, secondAddr ma.Multiaddr) bool {
// Exit early if we do get nil multiaddresses
if firstAddr == nil || secondAddr == nil {
return false
}
firstIP, err := manet.ToIP(firstAddr)
if err != nil {
return false
}
secondIP, err := manet.ToIP(secondAddr)
if err != nil {
return false
}
return firstIP.Equal(secondIP)
}
func retrieveIndicesFromBitfield(bitV bitfield.Bitvector64) []uint64 {
committeeIdxs := make([]uint64, 0, bitV.Count())
for i := uint64(0); i < 64; i++ {

View File

@@ -3,6 +3,7 @@ package peers_test
import (
"context"
"crypto/rand"
"strconv"
"testing"
"time"
@@ -517,6 +518,45 @@ func TestPrune(t *testing.T) {
assert.ErrorContains(t, "peer unknown", err)
}
func TestPeerIPTracker(t *testing.T) {
maxBadResponses := 2
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
},
},
})
badIP := "211.227.218.116"
badPeers := []peer.ID{}
for i := 0; i < peers.ColocationLimit+10; i++ {
port := strconv.Itoa(3000 + i)
addr, err := ma.NewMultiaddr("/ip4/" + badIP + "/tcp/" + port)
if err != nil {
t.Fatal(err)
}
badPeers = append(badPeers, createPeer(t, p, addr))
}
for _, pr := range badPeers {
assert.Equal(t, true, p.IsBad(pr), "peer with bad ip is not bad")
}
// Add in bad peers, so that our records are trimmed out
// from the peer store.
for i := 0; i < p.MaxPeerLimit()+100; i++ {
// Peer added to peer handler.
pid := addPeer(t, p, peers.PeerConnected)
p.Scorers().BadResponsesScorer().Increment(pid)
}
p.Prune()
for _, pr := range badPeers {
assert.Equal(t, false, p.IsBad(pr), "peer with good ip is regarded as bad")
}
}
func TestTrimmedOrderedPeers(t *testing.T) {
p := peers.NewStatus(context.Background(), &peers.StatusConfig{
PeerLimit: 30,
@@ -833,3 +873,15 @@ func addPeer(t *testing.T, p *peers.Status, state peerdata.PeerConnectionState)
})
return id
}
func createPeer(t *testing.T, p *peers.Status, addr ma.Multiaddr) peer.ID {
mhBytes := []byte{0x11, 0x04}
idBytes := make([]byte, 4)
_, err := rand.Read(idBytes)
require.NoError(t, err)
mhBytes = append(mhBytes, idBytes...)
id, err := peer.IDFromBytes(mhBytes)
require.NoError(t, err)
p.Add(new(enr.Record), id, addr, network.DirUnknown)
return id
}

View File

@@ -8,6 +8,7 @@ import (
"github.com/libp2p/go-libp2p-core/peer"
pubsub "github.com/libp2p/go-libp2p-pubsub"
pubsub_pb "github.com/libp2p/go-libp2p-pubsub/pb"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
"github.com/prysmaticlabs/prysm/shared/hashutil"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -53,7 +54,7 @@ func (s *Service) PublishToTopic(ctx context.Context, topic string, data []byte,
// Wait for at least 1 peer to be available to receive the published message.
for {
if len(topicHandle.ListPeers()) > 0 {
if len(topicHandle.ListPeers()) > 0 || flags.Get().MinimumSyncPeers == 0 {
return topicHandle.Publish(ctx, data, opts...)
}
select {
@@ -116,9 +117,19 @@ func msgIDFunction(pmsg *pubsub_pb.Message) string {
func setPubSubParameters() {
heartBeatInterval := 700 * time.Millisecond
pubsub.GossipSubDlo = 5
pubsub.GossipSubDlo = 6
pubsub.GossipSubD = 8
pubsub.GossipSubHeartbeatInterval = heartBeatInterval
pubsub.GossipSubHistoryLength = 6
pubsub.GossipSubHistoryGossip = 3
pubsub.TimeCacheDuration = 550 * heartBeatInterval
// Set a larger gossip history to ensure that slower
// messages have a longer time to be propagated. This
// comes with the tradeoff of larger memory usage and
// size of the seen message cache.
if featureconfig.Get().EnableLargerGossipHistory {
pubsub.GossipSubHistoryLength = 12
pubsub.GossipSubHistoryLength = 5
}
}

View File

@@ -9,7 +9,6 @@ import (
"sync"
"time"
"github.com/dgraph-io/ristretto"
"github.com/ethereum/go-ethereum/p2p/enode"
"github.com/ethereum/go-ethereum/p2p/enr"
"github.com/gogo/protobuf/proto"
@@ -55,9 +54,6 @@ const lookupLimit = 15
// maxBadResponses is the maximum number of bad responses from a peer before we stop talking to it.
const maxBadResponses = 5
// Exclusion list cache config values.
const cacheNumCounters, cacheMaxCost, cacheBufferItems = 1000, 1000, 64
// maxDialTimeout is the timeout for a single peer dial.
var maxDialTimeout = params.BeaconNetworkConfig().RespTimeout
@@ -73,7 +69,6 @@ type Service struct {
addrFilter *filter.Filters
ipLimiter *leakybucket.Collector
privKey *ecdsa.PrivateKey
exclusionList *ristretto.Cache
metaData *pb.MetaData
pubsub *pubsub.PubSub
joinedTopics map[string]*pubsub.Topic
@@ -96,21 +91,12 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
var err error
ctx, cancel := context.WithCancel(ctx)
_ = cancel // govet fix for lost cancel. Cancel is handled in service.Stop().
cache, err := ristretto.NewCache(&ristretto.Config{
NumCounters: cacheNumCounters,
MaxCost: cacheMaxCost,
BufferItems: cacheBufferItems,
})
if err != nil {
return nil, err
}
s := &Service{
ctx: ctx,
stateNotifier: cfg.StateNotifier,
cancel: cancel,
cfg: cfg,
exclusionList: cache,
isPreGenesis: true,
joinedTopics: make(map[string]*pubsub.Topic, len(GossipTopicMappings)),
subnetsLock: make(map[uint64]*sync.RWMutex),
@@ -156,6 +142,7 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
pubsub.WithNoAuthor(),
pubsub.WithMessageIdFn(msgIDFunction),
pubsub.WithSubscriptionFilter(s),
pubsub.WithPeerOutboundQueueSize(256),
}
// Add gossip scoring options.
if featureconfig.Get().EnablePeerScorer {
@@ -179,7 +166,6 @@ func NewService(ctx context.Context, cfg *Config) (*Service, error) {
ScorerParams: &scorers.Config{
BadResponsesScorerConfig: &scorers.BadResponsesScorerConfig{
Threshold: maxBadResponses,
Weight: -100,
DecayInterval: time.Hour,
},
},
@@ -440,7 +426,7 @@ func (s *Service) connectWithPeer(ctx context.Context, info peer.AddrInfo) error
return nil
}
if s.Peers().IsBad(info.ID) {
return nil
return errors.New("refused to connect to bad peer")
}
ctx, cancel := context.WithTimeout(ctx, maxDialTimeout)
defer cancel()

View File

@@ -13,11 +13,14 @@ import (
"github.com/libp2p/go-libp2p"
"github.com/libp2p/go-libp2p-core/host"
"github.com/libp2p/go-libp2p-core/peer"
noise "github.com/libp2p/go-libp2p-noise"
"github.com/multiformats/go-multiaddr"
mock "github.com/prysmaticlabs/prysm/beacon-chain/blockchain/testing"
"github.com/prysmaticlabs/prysm/beacon-chain/core/feed"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/encoder"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p/peers/scorers"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/event"
"github.com/prysmaticlabs/prysm/shared/p2putils"
@@ -70,7 +73,7 @@ func createHost(t *testing.T, port int) (host.Host, *ecdsa.PrivateKey, net.IP) {
ipAddr := net.ParseIP("127.0.0.1")
listen, err := multiaddr.NewMultiaddr(fmt.Sprintf("/ip4/%s/tcp/%d", ipAddr, port))
require.NoError(t, err, "Failed to p2p listen")
h, err := libp2p.New(context.Background(), []libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen)}...)
h, err := libp2p.New(context.Background(), []libp2p.Option{privKeyOption(pkey), libp2p.ListenAddrs(listen), libp2p.Security(noise.ID, noise.New)}...)
require.NoError(t, err)
return h, pkey, ipAddr
}
@@ -313,3 +316,48 @@ func initializeStateWithForkDigest(ctx context.Context, t *testing.T, ef *event.
return fd
}
func TestService_connectWithPeer(t *testing.T) {
tests := []struct {
name string
peers *peers.Status
info peer.AddrInfo
wantErr string
}{
{
name: "bad peer",
peers: func() *peers.Status {
ps := peers.NewStatus(context.Background(), &peers.StatusConfig{
ScorerParams: &scorers.Config{},
})
for i := 0; i < 10; i++ {
ps.Scorers().BadResponsesScorer().Increment("bad")
}
return ps
}(),
info: peer.AddrInfo{ID: "bad"},
wantErr: "refused to connect to bad peer",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
h, _, _ := createHost(t, 34567)
defer func() {
if err := h.Close(); err != nil {
t.Fatal(err)
}
}()
ctx := context.Background()
s := &Service{
host: h,
peers: tt.peers,
}
err := s.connectWithPeer(ctx, tt.info)
if len(tt.wantErr) > 0 {
require.ErrorContains(t, tt.wantErr, err)
} else {
require.NoError(t, err)
}
})
}
}

View File

@@ -18,6 +18,8 @@ import (
)
func TestStartDiscV5_DiscoverPeersWithSubnets(t *testing.T) {
// This test needs to be entirely rewritten and should be done in a follow up PR from #7885.
t.Skip("This test is now failing after PR 7885 due to false positive")
port := 2000
ipAddr, pkey := createAddrAndPrivKey(t)
genesisTime := time.Now()

View File

@@ -3,7 +3,11 @@ load("@prysm//tools/go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["types.go"],
srcs = [
"rpc_errors.go",
"rpc_goodbye_codes.go",
"types.go",
],
importpath = "github.com/prysmaticlabs/prysm/beacon-chain/p2p/types",
visibility = ["//beacon-chain:__subpackages__"],
deps = [

View File

@@ -0,0 +1,15 @@
package types
import "errors"
var (
ErrWrongForkDigestVersion = errors.New("wrong fork digest version")
ErrInvalidEpoch = errors.New("invalid epoch")
ErrInvalidFinalizedRoot = errors.New("invalid finalized root")
ErrInvalidSequenceNum = errors.New("invalid sequence number provided")
ErrGeneric = errors.New("internal service error")
ErrInvalidParent = errors.New("mismatched parent root")
ErrRateLimited = errors.New("rate limited")
ErrIODeadline = errors.New("i/o deadline exceeded")
ErrInvalidRequest = errors.New("invalid range, step or count")
)

View File

@@ -0,0 +1,40 @@
package types
// RPCGoodbyeCode represents goodbye code, used in sync package.
type RPCGoodbyeCode = SSZUint64
const (
// Spec defined codes.
GoodbyeCodeClientShutdown RPCGoodbyeCode = iota
GoodbyeCodeWrongNetwork
GoodbyeCodeGenericError
// Teku specific codes
GoodbyeCodeUnableToVerifyNetwork = RPCGoodbyeCode(128)
// Lighthouse specific codes
GoodbyeCodeTooManyPeers = RPCGoodbyeCode(129)
GoodbyeCodeBadScore = RPCGoodbyeCode(250)
GoodbyeCodeBanned = RPCGoodbyeCode(251)
)
// GoodbyeCodeMessages defines a mapping between goodbye codes and string messages.
var GoodbyeCodeMessages = map[RPCGoodbyeCode]string{
GoodbyeCodeClientShutdown: "client shutdown",
GoodbyeCodeWrongNetwork: "irrelevant network",
GoodbyeCodeGenericError: "fault/error",
GoodbyeCodeUnableToVerifyNetwork: "unable to verify network",
GoodbyeCodeTooManyPeers: "client has too many peers",
GoodbyeCodeBadScore: "peer score too low",
GoodbyeCodeBanned: "client banned this node",
}
// ErrToGoodbyeCode converts given error to RPC goodbye code.
func ErrToGoodbyeCode(err error) RPCGoodbyeCode {
switch err {
case ErrWrongForkDigestVersion:
return GoodbyeCodeWrongNetwork
default:
return GoodbyeCodeGenericError
}
}

View File

@@ -45,12 +45,17 @@ type headerInfo struct {
Time uint64
}
func headerToHeaderInfo(hdr *gethTypes.Header) *headerInfo {
func headerToHeaderInfo(hdr *gethTypes.Header) (*headerInfo, error) {
if hdr.Number == nil {
// A nil number will panic when calling *big.Int.Set(...)
return nil, errors.New("cannot convert block header with nil block number")
}
return &headerInfo{
Hash: hdr.Hash(),
Number: new(big.Int).Set(hdr.Number),
Time: hdr.Time,
}
}, nil
}
// hashKeyFn takes the hex string representation as the key for a headerInfo.
@@ -151,7 +156,10 @@ func (b *headerCache) AddHeader(hdr *gethTypes.Header) error {
b.lock.Lock()
defer b.lock.Unlock()
hInfo := headerToHeaderInfo(hdr)
hInfo, err := headerToHeaderInfo(hdr)
if err != nil {
return err
}
if err := b.hashCache.AddIfNotPresent(hInfo); err != nil {
return err

View File

@@ -2,6 +2,7 @@ package powchain
import (
"math/big"
"reflect"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -102,3 +103,47 @@ func TestBlockCache_maxSize(t *testing.T) {
assert.Equal(t, int(maxCacheSize), len(cache.hashCache.ListKeys()))
assert.Equal(t, int(maxCacheSize), len(cache.heightCache.ListKeys()))
}
func Test_headerToHeaderInfo(t *testing.T) {
type args struct {
hdr *gethTypes.Header
}
tests := []struct {
name string
args args
want *headerInfo
wantErr bool
}{
{
name: "OK",
args: args{hdr: &gethTypes.Header{
Number: big.NewInt(500),
Time: 2345,
}},
want: &headerInfo{
Number: big.NewInt(500),
Hash: common.Hash{239, 10, 13, 71, 156, 192, 23, 93, 73, 154, 255, 209, 163, 204, 129, 12, 179, 183, 65, 70, 205, 200, 57, 12, 17, 211, 209, 4, 104, 133, 73, 86},
Time: 2345,
},
},
{
name: "nil number",
args: args{hdr: &gethTypes.Header{
Time: 2345,
}},
wantErr: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := headerToHeaderInfo(tt.args.hdr)
if (err != nil) != tt.wantErr {
t.Errorf("headerToHeaderInfo() error = %v, wantErr %v", err, tt.wantErr)
return
}
if !reflect.DeepEqual(got, tt.want) {
t.Errorf("headerToHeaderInfo() got = %v, want %v", got, tt.want)
}
})
}
}

View File

@@ -229,7 +229,10 @@ func (s *Service) retrieveHeaderInfo(ctx context.Context, bNum uint64) (*headerI
if err := s.headerCache.AddHeader(blk); err != nil {
return nil, err
}
info = headerToHeaderInfo(blk)
info, err = headerToHeaderInfo(blk)
if err != nil {
return nil, err
}
}
return info, nil
}

View File

@@ -31,7 +31,7 @@ var (
const eth1LookBackPeriod = 100
const eth1DataSavingInterval = 100
const eth1HeaderReqLimit = 1000
const defaultEth1HeaderReqLimit = uint64(1000)
const depositlogRequestLimit = 10000
// Eth2GenesisPowchainInfo retrieves the genesis time and eth1 block number of the beacon chain
@@ -81,14 +81,7 @@ func (s *Service) ProcessLog(ctx context.Context, depositLog gethTypes.Log) erro
return errors.Wrap(err, "Could not process deposit log")
}
if s.lastReceivedMerkleIndex%eth1DataSavingInterval == 0 {
eth1Data := &protodb.ETH1ChainData{
CurrentEth1Data: s.latestEth1Data,
ChainstartData: s.chainStartData,
BeaconState: s.preGenesisState.InnerStateUnsafe(), // I promise not to mutate it!
Trie: s.depositTrie.ToProto(),
DepositContainers: s.depositCache.AllDepositContainers(ctx),
}
return s.beaconDB.SavePowchainData(ctx, eth1Data)
return s.savePowchainData(ctx)
}
return nil
}
@@ -231,6 +224,11 @@ func (s *Service) ProcessChainStart(genesisTime uint64, eth1BlockHash [32]byte,
StartTime: chainStartTime,
},
})
if err := s.savePowchainData(s.ctx); err != nil {
// continue on, if the save fails as this will get re-saved
// in the next interval.
log.Error(err)
}
}
func (s *Service) createGenesisTime(timeStamp uint64) uint64 {
@@ -278,7 +276,7 @@ func (s *Service) processPastLogs(ctx context.Context) error {
}
for currentBlockNum < latestFollowHeight {
start := currentBlockNum
end := currentBlockNum + eth1HeaderReqLimit
end := currentBlockNum + s.eth1HeaderReqLimit
// Appropriately bound the request, as we do not
// want request blocks beyond the current follow distance.
if end > latestFollowHeight {
@@ -352,9 +350,9 @@ func (s *Service) processPastLogs(ctx context.Context) error {
return nil
}
// requestBatchedLogs requests and processes all the logs from the period
// last polled to now.
func (s *Service) requestBatchedLogs(ctx context.Context) error {
// requestBatchedHeadersAndLogs requests and processes all the headers and
// logs from the period last polled to now.
func (s *Service) requestBatchedHeadersAndLogs(ctx context.Context) error {
// We request for the nth block behind the current head, in order to have
// stabilized logs when we retrieve it from the 1.0 chain.
@@ -363,7 +361,12 @@ func (s *Service) requestBatchedLogs(ctx context.Context) error {
return err
}
for i := s.latestEth1Data.LastRequestedBlock + 1; i <= requestedBlock; i++ {
err := s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
// Cache eth1 block header here.
_, err := s.BlockHashByHeight(ctx, big.NewInt(int64(i)))
if err != nil {
return err
}
err = s.ProcessETH1Block(ctx, big.NewInt(int64(i)))
if err != nil {
return err
}
@@ -490,3 +493,15 @@ func (s *Service) checkForChainstart(blockHash [32]byte, blockNumber *big.Int, b
s.ProcessChainStart(s.chainStartData.GenesisTime, blockHash, blockNumber)
}
}
// save all powchain related metadata to disk.
func (s *Service) savePowchainData(ctx context.Context) error {
eth1Data := &protodb.ETH1ChainData{
CurrentEth1Data: s.latestEth1Data,
ChainstartData: s.chainStartData,
BeaconState: s.preGenesisState.InnerStateUnsafe(), // I promise not to mutate it!
Trie: s.depositTrie.ToProto(),
DepositContainers: s.depositCache.AllDepositContainers(ctx),
}
return s.beaconDB.SavePowchainData(ctx, eth1Data)
}

View File

@@ -2,18 +2,14 @@ package powchain
import (
"io/ioutil"
"os"
"testing"
"github.com/sirupsen/logrus"
)
func TestMain(m *testing.M) {
run := func() int {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -25,6 +25,7 @@ import (
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/cache/depositcache"
statefeed "github.com/prysmaticlabs/prysm/beacon-chain/core/feed/state"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
"github.com/prysmaticlabs/prysm/beacon-chain/core/state"
"github.com/prysmaticlabs/prysm/beacon-chain/db"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
@@ -148,16 +149,18 @@ type Service struct {
runError error
preGenesisState *stateTrie.BeaconState
stateGen *stategen.State
eth1HeaderReqLimit uint64
}
// Web3ServiceConfig defines a config struct for web3 service to use through its life cycle.
type Web3ServiceConfig struct {
HTTPEndPoint string
DepositContract common.Address
BeaconDB db.HeadAccessDatabase
DepositCache *depositcache.DepositCache
StateNotifier statefeed.Notifier
StateGen *stategen.State
HTTPEndPoint string
DepositContract common.Address
BeaconDB db.HeadAccessDatabase
DepositCache *depositcache.DepositCache
StateNotifier statefeed.Notifier
StateGen *stategen.State
Eth1HeaderReqLimit uint64
}
// NewService sets up a new instance with an ethclient when
@@ -175,6 +178,11 @@ func NewService(ctx context.Context, config *Web3ServiceConfig) (*Service, error
return nil, errors.Wrap(err, "could not setup genesis state")
}
eth1HeaderReqLimit := config.Eth1HeaderReqLimit
if eth1HeaderReqLimit == 0 {
eth1HeaderReqLimit = defaultEth1HeaderReqLimit
}
s := &Service{
ctx: ctx,
cancel: cancel,
@@ -200,6 +208,7 @@ func NewService(ctx context.Context, config *Web3ServiceConfig) (*Service, error
preGenesisState: genState,
headTicker: time.NewTicker(time.Duration(params.BeaconConfig().SecondsPerETH1Block) * time.Second),
stateGen: config.StateGen,
eth1HeaderReqLimit: eth1HeaderReqLimit,
}
eth1Data, err := config.BeaconDB.PowchainData(ctx)
@@ -348,22 +357,6 @@ func (s *Service) followBlockHeight(ctx context.Context) (uint64, error) {
if s.latestEth1Data.BlockHeight > params.BeaconConfig().Eth1FollowDistance {
latestValidBlock = s.latestEth1Data.BlockHeight - params.BeaconConfig().Eth1FollowDistance
}
blockTime, err := s.BlockTimeByHeight(ctx, big.NewInt(int64(latestValidBlock)))
if err != nil {
return 0, err
}
followTime := func(t uint64) uint64 {
return t + params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().SecondsPerETH1Block
}
for followTime(blockTime) > s.latestEth1Data.BlockTime && latestValidBlock > 0 {
// reduce block height to get eth1 block which
// fulfills stated condition
latestValidBlock--
blockTime, err = s.BlockTimeByHeight(ctx, big.NewInt(int64(latestValidBlock)))
if err != nil {
return 0, err
}
}
return latestValidBlock, nil
}
@@ -392,20 +385,38 @@ func (s *Service) dialETH1Nodes() (*ethclient.Client, *gethRPC.Client, error) {
return nil, nil, err
}
httpClient := ethclient.NewClient(httpRPCClient)
// Add a method to clean-up and close clients in the event
// of any connection failure.
closeClients := func() {
httpRPCClient.Close()
httpClient.Close()
}
syncProg, err := httpClient.SyncProgress(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
if syncProg != nil {
closeClients()
return nil, nil, errors.New("eth1 node has not finished syncing yet")
}
// Make a simple call to ensure we are actually connected to a working node.
cID, err := httpClient.ChainID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
nID, err := httpClient.NetworkID(s.ctx)
if err != nil {
closeClients()
return nil, nil, err
}
if cID.Uint64() != params.BeaconNetworkConfig().ChainID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect chain id, %d != %d", cID.Uint64(), params.BeaconNetworkConfig().ChainID)
}
if nID.Uint64() != params.BeaconNetworkConfig().NetworkID {
closeClients()
return nil, nil, fmt.Errorf("eth1 node using incorrect network id, %d != %d", nID.Uint64(), params.BeaconNetworkConfig().NetworkID)
}
@@ -643,7 +654,7 @@ func (s *Service) handleETH1FollowDistance() {
log.Error("Beacon node is not respecting the follow distance")
return
}
if err := s.requestBatchedLogs(ctx); err != nil {
if err := s.requestBatchedHeadersAndLogs(ctx); err != nil {
s.runError = err
log.Error(err)
return
@@ -681,7 +692,11 @@ func (s *Service) initPOWService() {
continue
}
// Cache eth1 headers from our voting period.
s.cacheHeadersForEth1DataVote(ctx)
if err := s.cacheHeadersForEth1DataVote(ctx); err != nil {
log.Errorf("Unable to process past headers %v", err)
s.retryETH1Node(err)
continue
}
return
}
}
@@ -756,23 +771,46 @@ func (s *Service) logTillChainStart() {
// cacheHeadersForEth1DataVote makes sure that voting for eth1data after startup utilizes cached headers
// instead of making multiple RPC requests to the ETH1 endpoint.
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) {
blocksPerVotingPeriod := params.BeaconConfig().EpochsPerEth1VotingPeriod * params.BeaconConfig().SlotsPerEpoch *
params.BeaconConfig().SecondsPerSlot / params.BeaconConfig().SecondsPerETH1Block
func (s *Service) cacheHeadersForEth1DataVote(ctx context.Context) error {
// Find the end block to request from.
end, err := s.followBlockHeight(ctx)
if err != nil {
log.Errorf("Unable to fetch height of follow block: %v", err)
return err
}
// We fetch twice the number of headers just to be safe.
start := uint64(0)
if end >= 2*blocksPerVotingPeriod {
start = end - 2*blocksPerVotingPeriod
start, err := s.determineEarliestVotingBlock(ctx, end)
if err != nil {
return err
}
// We call batchRequestHeaders for its header caching side-effect, so we don't need the return value.
_, err = s.batchRequestHeaders(start, end)
if err != nil {
log.Errorf("Unable to cache headers: %v", err)
return err
}
return nil
}
// determines the earliest voting block from which to start caching all our previous headers from.
func (s *Service) determineEarliestVotingBlock(ctx context.Context, followBlock uint64) (uint64, error) {
genesisTime := s.chainStartData.GenesisTime
currSlot := helpers.CurrentSlot(genesisTime)
// In the event genesis has not occurred yet, we just request go back follow_distance blocks.
if genesisTime == 0 || currSlot == 0 {
earliestBlk := uint64(0)
if followBlock > params.BeaconConfig().Eth1FollowDistance {
earliestBlk = followBlock - params.BeaconConfig().Eth1FollowDistance
}
return earliestBlk, nil
}
votingTime := helpers.VotingPeriodStartTime(genesisTime, currSlot)
followBackDist := 2 * params.BeaconConfig().SecondsPerETH1Block * params.BeaconConfig().Eth1FollowDistance
if followBackDist > votingTime {
return 0, errors.Errorf("invalid genesis time provided. %d > %d", followBackDist, votingTime)
}
earliestValidTime := votingTime - followBackDist
blkNum, err := s.BlockNumberByTimestamp(ctx, earliestValidTime)
if err != nil {
return 0, err
}
return blkNum.Uint64(), nil
}

View File

@@ -452,3 +452,78 @@ func TestInitDepositCache_OK(t *testing.T) {
require.NoError(t, s.initDepositCaches(context.Background(), ctrs))
require.Equal(t, 3, len(s.depositCache.PendingContainers(context.Background(), nil)))
}
func TestNewService_EarliestVotingBlock(t *testing.T) {
testAcc, err := contracts.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB, _ := dbutil.SetupDB(t)
web3Service, err := NewService(context.Background(), &Web3ServiceConfig{
HTTPEndPoint: endpoint,
DepositContract: testAcc.ContractAddr,
BeaconDB: beaconDB,
})
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
web3Service.eth1DataFetcher = &goodFetcher{backend: testAcc.Backend}
// simulated backend sets eth1 block
// time as 10 seconds
conf := params.BeaconConfig()
conf.SecondsPerETH1Block = 10
conf.Eth1FollowDistance = 50
params.OverrideBeaconConfig(conf)
defer func() {
params.UseMainnetConfig()
}()
// Genesis not set
followBlock := uint64(2000)
blk, err := web3Service.determineEarliestVotingBlock(context.Background(), followBlock)
require.NoError(t, err)
assert.Equal(t, followBlock-conf.Eth1FollowDistance, blk, "unexpected earliest voting block")
// Genesis is set.
numToForward := 1500
// forward 1500 blocks
for i := 0; i < numToForward; i++ {
testAcc.Backend.Commit()
}
currTime := testAcc.Backend.Blockchain().CurrentHeader().Time
now := time.Now()
err = testAcc.Backend.AdjustTime(now.Sub(time.Unix(int64(currTime), 0)))
require.NoError(t, err)
testAcc.Backend.Commit()
currTime = testAcc.Backend.Blockchain().CurrentHeader().Time
web3Service.latestEth1Data.BlockHeight = testAcc.Backend.Blockchain().CurrentHeader().Number.Uint64()
web3Service.latestEth1Data.BlockTime = testAcc.Backend.Blockchain().CurrentHeader().Time
web3Service.chainStartData.GenesisTime = currTime
// With a current slot of zero, only request follow_blocks behind.
blk, err = web3Service.determineEarliestVotingBlock(context.Background(), followBlock)
require.NoError(t, err)
assert.Equal(t, followBlock-conf.Eth1FollowDistance, blk, "unexpected earliest voting block")
}
func TestNewService_Eth1HeaderRequLimit(t *testing.T) {
testAcc, err := contracts.Setup()
require.NoError(t, err, "Unable to set up simulated backend")
beaconDB, _ := dbutil.SetupDB(t)
s1, err := NewService(context.Background(), &Web3ServiceConfig{
HTTPEndPoint: endpoint,
DepositContract: testAcc.ContractAddr,
BeaconDB: beaconDB,
})
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
assert.Equal(t, defaultEth1HeaderReqLimit, s1.eth1HeaderReqLimit, "default eth1 header request limit not set")
s2, err := NewService(context.Background(), &Web3ServiceConfig{
HTTPEndPoint: endpoint,
DepositContract: testAcc.ContractAddr,
BeaconDB: beaconDB,
Eth1HeaderReqLimit: uint64(150),
})
require.NoError(t, err, "unable to setup web3 ETH1.0 chain service")
assert.Equal(t, uint64(150), s2.eth1HeaderReqLimit, "unable to set eth1HeaderRequestLimit")
}

View File

@@ -265,7 +265,7 @@ func (bs *Server) StreamIndexedAttestations(
}
if data.Attestation == nil || data.Attestation.Aggregate == nil {
// One nil attestation shouldn't stop the stream.
log.Info("Indexed attestations stream got nil attestation or nil attestation aggregate")
log.Debug("Indexed attestations stream got nil attestation or nil attestation aggregate")
continue
}
bs.ReceivedAttestationsBuffer <- data.Attestation.Aggregate
@@ -340,7 +340,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) {
// We aggregate the received attestations, we know they all have the same data root.
aggAtts, err := attaggregation.Aggregate(atts)
if err != nil {
log.WithError(err).Error("Could not aggregate collected attestations")
log.WithError(err).Error("Could not aggregate attestations")
continue
}
if len(aggAtts) == 0 {
@@ -356,7 +356,7 @@ func (bs *Server) collectReceivedAttestations(ctx context.Context) {
case att := <-bs.ReceivedAttestationsBuffer:
attDataRoot, err := att.Data.HashTreeRoot()
if err != nil {
log.Errorf("Could not hash tree root data: %v", err)
log.Errorf("Could not hash tree root attestation data: %v", err)
continue
}
attsByRoot[attDataRoot] = append(attsByRoot[attDataRoot], att)

View File

@@ -624,6 +624,13 @@ func TestServer_ListIndexedAttestations_GenesisEpoch(t *testing.T) {
})
require.NoError(t, err)
assert.Equal(t, len(indexedAtts), len(res.IndexedAttestations), "Incorrect indexted attestations length")
sort.Slice(indexedAtts, func(i, j int) bool {
return indexedAtts[i].Data.Slot < indexedAtts[j].Data.Slot
})
sort.Slice(res.IndexedAttestations, func(i, j int) bool {
return res.IndexedAttestations[i].Data.Slot < res.IndexedAttestations[j].Data.Slot
})
assert.DeepEqual(t, indexedAtts, res.IndexedAttestations, "Incorrect list indexed attestations response")
}

View File

@@ -1,7 +1,6 @@
package beacon
import (
"os"
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/flags"
@@ -9,21 +8,18 @@ import (
)
func TestMain(m *testing.M) {
run := func() int {
// Use minimal config to reduce test setup time.
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
// Use minimal config to reduce test setup time.
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
MinimumSyncPeers: 30,
})
defer func() {
flags.Init(resetFlags)
}()
resetFlags := flags.Get()
flags.Init(&flags.GlobalFlags{
MinimumSyncPeers: 30,
})
defer func() {
flags.Init(resetFlags)
}()
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -38,7 +38,7 @@ func (bs *Server) ListBlocks(
case *ethpb.ListBlocksRequest_Epoch:
blks, _, err := bs.BeaconDB.Blocks(ctx, filters.NewFilter().SetStartEpoch(q.Epoch).SetEndEpoch(q.Epoch))
if err != nil {
return nil, status.Errorf(codes.Internal, "Failed to get blocks: %v", err)
return nil, status.Errorf(codes.Internal, "Could not get blocks: %v", err)
}
numBlks := len(blks)
@@ -194,12 +194,12 @@ func (bs *Server) StreamBlocks(_ *ptypes.Empty, stream ethpb.BeaconChain_StreamB
}
headState, err := bs.HeadFetcher.HeadState(bs.Ctx)
if err != nil {
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Warn("Could not get head state to verify block signature")
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Error("Could not get head state")
continue
}
if err := blocks.VerifyBlockSignature(headState, data.SignedBlock); err != nil {
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Warn("Could not verify block signature")
log.WithError(err).WithField("blockSlot", data.SignedBlock.Block.Slot).Error("Could not verify block signature")
continue
}
if err := stream.Send(data.SignedBlock); err != nil {

View File

@@ -73,7 +73,7 @@ func (bs *Server) retrieveCommitteesForEpoch(
}
requestedState, err := bs.StateGen.StateBySlot(ctx, startSlot)
if err != nil {
return nil, nil, status.Error(codes.Internal, "Could not get state")
return nil, nil, status.Errorf(codes.Internal, "Could not get state: %v", err)
}
seed, err := helpers.Seed(requestedState, epoch, params.BeaconConfig().DomainBeaconAttester)
if err != nil {

View File

@@ -82,7 +82,7 @@ func TestServer_ListBeaconCommittees_PreviousEpoch(t *testing.T) {
mixes[i] = make([]byte, 32)
}
require.NoError(t, headState.SetRandaoMixes(mixes))
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch*2))
require.NoError(t, headState.SetSlot(params.BeaconConfig().SlotsPerEpoch))
b := testutil.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, b))

View File

@@ -358,7 +358,7 @@ func (is *infostream) generatePendingValidatorInfo(info *ethpb.ValidatorInfo) (*
if deposit.block != nil {
info.Status = ethpb.ValidatorStatus_DEPOSITED
if queueTimestamp, err := is.depositQueueTimestamp(deposit.block); err != nil {
log.WithError(err).Error("Failed to obtain queue activation timestamp")
log.WithError(err).Error("Could not obtain queue activation timestamp")
} else {
info.TransitionTimestamp = queueTimestamp
}
@@ -415,7 +415,7 @@ func (is *infostream) calculateActivationTimeForPendingValidators(res []*ethpb.V
for curEpoch := epoch + 1; len(sortedIndices) > 0 && len(pendingValidators) > 0; curEpoch++ {
toProcess, err := helpers.ValidatorChurnLimit(numAttestingValidators)
if err != nil {
log.WithError(err).Error("Failed to determine validator churn limit")
log.WithError(err).Error("Could not determine validator churn limit")
}
if toProcess > uint64(len(sortedIndices)) {
toProcess = uint64(len(sortedIndices))
@@ -456,7 +456,7 @@ func (is *infostream) handleBlockProcessed() {
is.currentEpoch = blockEpoch
if err := is.sendValidatorsInfo(is.pubKeys); err != nil {
// Client probably disconnected.
log.WithError(err).Debug("Failed to send infostream response")
log.WithError(err).Debug("Could not send infostream response")
}
}

View File

@@ -958,7 +958,7 @@ func TestServer_ListValidators_FromOldEpoch(t *testing.T) {
}
st := testutil.NewBeaconState()
require.NoError(t, st.SetSlot(30*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, st.SetSlot(20*params.BeaconConfig().SlotsPerEpoch))
require.NoError(t, st.SetValidators(validators))
b := testutil.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, b))

View File

@@ -18,7 +18,7 @@ func (bs *Server) ListValidators(ctx context.Context, req *ethpb.StateValidators
}
// ListValidatorBalances returns a filterable list of validator balances.
func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.StateValidatorsRequest) (*ethpb.ValidatorBalancesResponse, error) {
func (bs *Server) ListValidatorBalances(ctx context.Context, req *ethpb.ValidatorBalancesRequest) (*ethpb.ValidatorBalancesResponse, error) {
return nil, errors.New("unimplemented")
}

View File

@@ -11,6 +11,7 @@ go_library(
"//beacon-chain/db:go_default_library",
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/sync:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",
"//shared/version:go_default_library",
"@com_github_gogo_protobuf//types:go_default_library",
"@com_github_libp2p_go_libp2p_core//network:go_default_library",
@@ -32,6 +33,7 @@ go_test(
"//beacon-chain/p2p:go_default_library",
"//beacon-chain/p2p/testing:go_default_library",
"//beacon-chain/sync/initial-sync/testing:go_default_library",
"//proto/beacon/rpc/v1:go_default_library",
"//shared/bytesutil:go_default_library",
"//shared/testutil:go_default_library",
"//shared/testutil/assert:go_default_library",

View File

@@ -17,6 +17,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/db"
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
"github.com/prysmaticlabs/prysm/beacon-chain/sync"
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
"github.com/prysmaticlabs/prysm/shared/version"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@@ -27,13 +28,15 @@ import (
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
// version information, and services the node implements and runs.
type Server struct {
SyncChecker sync.Checker
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
SyncChecker sync.Checker
Server *grpc.Server
BeaconDB db.ReadOnlyDatabase
PeersFetcher p2p.PeersProvider
PeerManager p2p.PeerManager
GenesisTimeFetcher blockchain.TimeFetcher
GenesisFetcher blockchain.GenesisFetcher
BeaconMonitoringHost string
BeaconMonitoringPort int
}
// GetSyncStatus checks the current network sync status of the node.
@@ -117,6 +120,13 @@ func (ns *Server) GetHost(_ context.Context, _ *ptypes.Empty) (*ethpb.HostData,
}, nil
}
// GetLogsEndpoint
func (ns *Server) GetLogsEndpoint(_ context.Context, _ *ptypes.Empty) (*pbrpc.LogsEndpointResponse, error) {
return &pbrpc.LogsEndpointResponse{
BeaconLogsEndpoint: fmt.Sprintf("%s:%d", ns.BeaconMonitoringHost, ns.BeaconMonitoringPort),
}, nil
}
// GetPeer returns the data known about the peer defined by the provided peer id.
func (ns *Server) GetPeer(_ context.Context, peerReq *ethpb.PeerRequest) (*ethpb.Peer, error) {
pid, err := peer.Decode(peerReq.PeerId)

View File

@@ -15,6 +15,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/p2p"
mockP2p "github.com/prysmaticlabs/prysm/beacon-chain/p2p/testing"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
pbrpc "github.com/prysmaticlabs/prysm/proto/beacon/rpc/v1"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
@@ -77,6 +78,19 @@ func TestNodeServer_GetVersion(t *testing.T) {
assert.Equal(t, v, res.Version)
}
func TestNodeServer_GetLogsEndpoint(t *testing.T) {
ns := &Server{
BeaconMonitoringHost: "localhost",
BeaconMonitoringPort: 8080,
}
res, err := ns.GetLogsEndpoint(context.Background(), &ptypes.Empty{})
require.NoError(t, err)
want := &pbrpc.LogsEndpointResponse{
BeaconLogsEndpoint: "localhost:8080",
}
assert.DeepEqual(t, want, res)
}
func TestNodeServer_GetImplementedServices(t *testing.T) {
server := grpc.NewServer()
ns := &Server{

View File

@@ -4,6 +4,7 @@ package rpc
import (
"context"
"errors"
"fmt"
"net"
"sync"
@@ -77,6 +78,8 @@ type Service struct {
syncService chainSync.Checker
host string
port string
beaconMonitoringHost string
beaconMonitoringPort int
listener net.Listener
withCert string
withKey string
@@ -104,6 +107,8 @@ type Config struct {
Port string
CertFlag string
KeyFlag string
BeaconMonitoringHost string
BeaconMonitoringPort int
BeaconDB db.HeadAccessDatabase
ChainInfoFetcher blockchain.ChainInfoFetcher
HeadFetcher blockchain.HeadFetcher
@@ -161,6 +166,8 @@ func NewService(ctx context.Context, cfg *Config) *Service {
syncService: cfg.SyncService,
host: cfg.Host,
port: cfg.Port,
beaconMonitoringHost: cfg.BeaconMonitoringHost,
beaconMonitoringPort: cfg.BeaconMonitoringPort,
withCert: cfg.CertFlag,
withKey: cfg.KeyFlag,
depositFetcher: cfg.DepositFetcher,
@@ -249,13 +256,15 @@ func (s *Service) Start() {
StateGen: s.stateGen,
}
nodeServer := &node.Server{
BeaconDB: s.beaconDB,
Server: s.grpcServer,
SyncChecker: s.syncService,
GenesisTimeFetcher: s.genesisTimeFetcher,
PeersFetcher: s.peersFetcher,
PeerManager: s.peerManager,
GenesisFetcher: s.genesisFetcher,
BeaconDB: s.beaconDB,
Server: s.grpcServer,
SyncChecker: s.syncService,
GenesisTimeFetcher: s.genesisTimeFetcher,
PeersFetcher: s.peersFetcher,
PeerManager: s.peerManager,
GenesisFetcher: s.genesisFetcher,
BeaconMonitoringHost: s.beaconMonitoringHost,
BeaconMonitoringPort: s.beaconMonitoringPort,
}
beaconChainServer := &beacon.Server{
Ctx: s.ctx,
@@ -297,6 +306,7 @@ func (s *Service) Start() {
SyncChecker: s.syncService,
}
ethpb.RegisterNodeServer(s.grpcServer, nodeServer)
pbrpc.RegisterHealthServer(s.grpcServer, nodeServer)
ethpb.RegisterBeaconChainServer(s.grpcServer, beaconChainServer)
ethpbv1.RegisterBeaconChainServer(s.grpcServer, beaconChainServerV1)
if s.enableDebugRPCEndpoints {
@@ -337,6 +347,9 @@ func (s *Service) Stop() error {
// Status returns nil or credentialError
func (s *Service) Status() error {
if s.syncService.Syncing() {
return errors.New("syncing")
}
if s.credentialError != nil {
return s.credentialError
}

View File

@@ -45,7 +45,7 @@ func TestLifecycle_OK(t *testing.T) {
func TestStatus_CredentialError(t *testing.T) {
credentialErr := errors.New("credentialError")
s := &Service{credentialError: credentialErr}
s := &Service{credentialError: credentialErr, syncService: &mockSync.Sync{IsSyncing: false}}
assert.ErrorContains(t, s.credentialError.Error(), s.Status())
}

View File

@@ -68,7 +68,7 @@ func (vs *Server) SubmitAggregateSelectionProof(ctx context.Context, req *ethpb.
if len(aggregatedAtts) == 0 {
aggregatedAtts = vs.AttPool.UnaggregatedAttestationsBySlotIndex(req.Slot, req.CommitteeIndex)
if len(aggregatedAtts) == 0 {
return nil, status.Errorf(codes.Internal, "Could not find attestation for slot and committee in pool")
return nil, status.Errorf(codes.NotFound, "Could not find attestation for slot and committee in pool")
}
}

View File

@@ -39,6 +39,9 @@ func (vs *Server) StreamDuties(req *ethpb.DutiesRequest, stream ethpb.BeaconNode
// If we are post-genesis time, then set the current epoch to
// the number epochs since the genesis time, otherwise 0 by default.
genesisTime := vs.GenesisTimeFetcher.GenesisTime()
if genesisTime.IsZero() {
return status.Error(codes.Unavailable, "genesis time is not set")
}
var currentEpoch uint64
if genesisTime.Before(timeutils.Now()) {
currentEpoch = slotutil.EpochsSinceGenesis(vs.GenesisTimeFetcher.GenesisTime())

View File

@@ -64,7 +64,7 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
}
defer func() {
if err := vs.AttestationCache.MarkNotInProgress(req); err != nil {
log.WithError(err).Error("Failed to mark cache not in progress")
log.WithError(err).Error("Could not mark cache not in progress")
}
}()
@@ -89,7 +89,7 @@ func (vs *Server) GetAttestationData(ctx context.Context, req *ethpb.Attestation
}
}
if headState == nil {
return nil, status.Error(codes.Internal, "Failed to lookup parent state from head.")
return nil, status.Error(codes.Internal, "Could not lookup parent state from head.")
}
if helpers.CurrentEpoch(headState) < helpers.SlotToEpoch(req.Slot) {

View File

@@ -192,12 +192,12 @@ func (vs *Server) eth1Data(ctx context.Context, slot uint64) (*ethpb.Eth1Data, e
// Look up most recent block up to timestamp
blockNumber, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, eth1VotingPeriodStartTime)
if err != nil {
log.WithError(err).Error("Failed to get block number from timestamp")
log.WithError(err).Error("Could not get block number from timestamp")
return vs.randomETH1DataVote(ctx)
}
eth1Data, err := vs.defaultEth1DataResponse(ctx, blockNumber)
if err != nil {
log.WithError(err).Error("Failed to get eth1 data from block number")
log.WithError(err).Error("Could not get eth1 data from block number")
return vs.randomETH1DataVote(ctx)
}
@@ -237,12 +237,12 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState *stateTr
lastBlockByEarliestValidTime, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, earliestValidTime)
if err != nil {
log.WithError(err).Error("Failed to get last block by earliest valid time")
log.WithError(err).Error("Could not get last block by earliest valid time")
return vs.randomETH1DataVote(ctx)
}
timeOfLastBlockByEarliestValidTime, err := vs.Eth1BlockFetcher.BlockTimeByHeight(ctx, lastBlockByEarliestValidTime)
if err != nil {
log.WithError(err).Error("Failed to get time of last block by earliest valid time")
log.WithError(err).Error("Could not get time of last block by earliest valid time")
return vs.randomETH1DataVote(ctx)
}
// Increment the earliest block if the original block's time is before valid time.
@@ -253,12 +253,12 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState *stateTr
lastBlockByLatestValidTime, err := vs.Eth1BlockFetcher.BlockNumberByTimestamp(ctx, latestValidTime)
if err != nil {
log.WithError(err).Error("Failed to get last block by latest valid time")
log.WithError(err).Error("Could not get last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
timeOfLastBlockByLatestValidTime, err := vs.Eth1BlockFetcher.BlockTimeByHeight(ctx, lastBlockByLatestValidTime)
if err != nil {
log.WithError(err).Error("Failed to get time of last block by latest valid time")
log.WithError(err).Error("Could not get time of last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
if timeOfLastBlockByLatestValidTime < earliestValidTime {
@@ -278,7 +278,7 @@ func (vs *Server) eth1DataMajorityVote(ctx context.Context, beaconState *stateTr
if lastBlockDepositCount >= vs.HeadFetcher.HeadETH1Data().DepositCount {
hash, err := vs.Eth1BlockFetcher.BlockHashByHeight(ctx, lastBlockByLatestValidTime)
if err != nil {
log.WithError(err).Error("Failed to get hash of last block by latest valid time")
log.WithError(err).Error("Could not get hash of last block by latest valid time")
return vs.randomETH1DataVote(ctx)
}
return &ethpb.Eth1Data{
@@ -507,7 +507,7 @@ func (vs *Server) canonicalEth1Data(
// Add in current vote, to get accurate vote tally
if err := beaconState.AppendEth1DataVotes(currentVote); err != nil {
return nil, nil, errors.Wrap(err, "failed to append eth1 data votes to state")
return nil, nil, errors.Wrap(err, "could not append eth1 data votes to state")
}
hasSupport, err := blocks.Eth1DataHasEnoughSupport(beaconState, currentVote)
if err != nil {

View File

@@ -164,8 +164,9 @@ func (vs *Server) WaitForChainStart(_ *ptypes.Empty, stream ethpb.BeaconNodeVali
}
if head != nil {
res := &ethpb.ChainStartResponse{
Started: true,
GenesisTime: head.GenesisTime(),
Started: true,
GenesisTime: head.GenesisTime(),
GenesisValidatorsRoot: head.GenesisValidatorRoot(),
}
return stream.Send(res)
}
@@ -176,71 +177,17 @@ func (vs *Server) WaitForChainStart(_ *ptypes.Empty, stream ethpb.BeaconNodeVali
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.ChainStarted {
data, ok := event.Data.(*statefeed.ChainStartedData)
if !ok {
return errors.New("event data is not type *statefeed.ChainStartData")
}
log.WithField("starttime", data.StartTime).Debug("Received chain started event")
log.Debug("Sending genesis time notification to connected validator clients")
res := &ethpb.ChainStartResponse{
Started: true,
GenesisTime: uint64(data.StartTime.Unix()),
}
return stream.Send(res)
}
// Handle race condition in the event the blockchain
// service isn't initialized in time and the saved head state is nil.
if event.Type == statefeed.Initialized {
data, ok := event.Data.(*statefeed.InitializedData)
if !ok {
return errors.New("event data is not type *statefeed.InitializedData")
}
res := &ethpb.ChainStartResponse{
Started: true,
GenesisTime: uint64(data.StartTime.Unix()),
}
return stream.Send(res)
}
case <-stateSub.Err():
return status.Error(codes.Aborted, "Subscriber closed, exiting goroutine")
case <-vs.Ctx.Done():
return status.Error(codes.Canceled, "Context canceled")
}
}
}
// WaitForSynced subscribes to the state channel and ends the stream when the state channel
// indicates the beacon node has been initialized and is ready
func (vs *Server) WaitForSynced(_ *ptypes.Empty, stream ethpb.BeaconNodeValidator_WaitForSyncedServer) error {
head, err := vs.HeadFetcher.HeadState(stream.Context())
if err != nil {
return status.Errorf(codes.Internal, "Could not retrieve head state: %v", err)
}
if head != nil && !vs.SyncChecker.Syncing() {
res := &ethpb.SyncedResponse{
Synced: true,
GenesisTime: head.GenesisTime(),
}
return stream.Send(res)
}
stateChannel := make(chan *feed.Event, 1)
stateSub := vs.StateNotifier.StateFeed().Subscribe(stateChannel)
defer stateSub.Unsubscribe()
for {
select {
case event := <-stateChannel:
if event.Type == statefeed.Synced {
data, ok := event.Data.(*statefeed.SyncedData)
if !ok {
return errors.New("event data is not type *statefeed.SyncedData")
}
log.WithField("starttime", data.StartTime).Debug("Received sync completed event")
log.WithField("starttime", data.StartTime).Debug("Received chain started event")
log.Debug("Sending genesis time notification to connected validator clients")
res := &ethpb.SyncedResponse{
Synced: true,
GenesisTime: uint64(data.StartTime.Unix()),
res := &ethpb.ChainStartResponse{
Started: true,
GenesisTime: uint64(data.StartTime.Unix()),
GenesisValidatorsRoot: data.GenesisValidatorsRoot,
}
return stream.Send(res)
}

View File

@@ -17,7 +17,6 @@ import (
dbutil "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
mockPOW "github.com/prysmaticlabs/prysm/beacon-chain/powchain/testing"
stateTrie "github.com/prysmaticlabs/prysm/beacon-chain/state"
mockSync "github.com/prysmaticlabs/prysm/beacon-chain/sync/initial-sync/testing"
pbp2p "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/bls"
"github.com/prysmaticlabs/prysm/shared/bytesutil"
@@ -321,8 +320,10 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
require.NoError(t, trie.SetSlot(3))
require.NoError(t, db.SaveState(ctx, trie, headBlockRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
genesisValidatorsRoot := bytesutil.ToBytes32([]byte("validators"))
require.NoError(t, trie.SetGenesisValidatorRoot(genesisValidatorsRoot[:]))
chainService := &mockChain.ChainService{State: trie}
chainService := &mockChain.ChainService{State: trie, ValidatorsRoot: genesisValidatorsRoot}
Server := &Server{
Ctx: context.Background(),
ChainStartFetcher: &mockPOW.POWChain{
@@ -337,8 +338,9 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl)
mockStream.EXPECT().Send(
&ethpb.ChainStartResponse{
Started: true,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
Started: true,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
GenesisValidatorsRoot: genesisValidatorsRoot[:],
},
).Return(nil)
mockStream.EXPECT().Context().Return(context.Background())
@@ -347,7 +349,7 @@ func TestWaitForChainStart_AlreadyStarted(t *testing.T) {
func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
db, _ := dbutil.SetupDB(t)
genesisValidatorRoot := [32]byte{0x01, 0x02}
genesisValidatorRoot := params.BeaconConfig().ZeroHash
// Set head state to nil
chainService := &mockChain.ChainService{State: nil}
@@ -386,8 +388,9 @@ func TestWaitForChainStart_HeadStateDoesNotExist(t *testing.T) {
func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
db, _ := dbutil.SetupDB(t)
hook := logTest.NewGlobal()
genesisValidatorsRoot := bytesutil.ToBytes32([]byte("validators"))
chainService := &mockChain.ChainService{}
Server := &Server{
Ctx: context.Background(),
@@ -404,8 +407,9 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
mockStream := mock.NewMockBeaconNodeValidator_WaitForChainStartServer(ctrl)
mockStream.EXPECT().Send(
&ethpb.ChainStartResponse{
Started: true,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
Started: true,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
GenesisValidatorsRoot: genesisValidatorsRoot[:],
},
).Return(nil)
mockStream.EXPECT().Context().Return(context.Background())
@@ -417,114 +421,10 @@ func TestWaitForChainStart_NotStartedThenLogFired(t *testing.T) {
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = Server.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.ChainStarted,
Data: &statefeed.ChainStartedData{
StartTime: time.Unix(0, 0),
},
})
}
exitRoutine <- true
require.LogsContain(t, hook, "Sending genesis time")
}
func TestWaitForSynced_ContextClosed(t *testing.T) {
db, _ := dbutil.SetupDB(t)
ctx, cancel := context.WithCancel(context.Background())
chainService := &mockChain.ChainService{}
Server := &Server{
Ctx: ctx,
ChainStartFetcher: &mockPOW.FaultyMockPOWChain{
ChainFeed: new(event.Feed),
},
StateNotifier: chainService.StateNotifier(),
BeaconDB: db,
HeadFetcher: chainService,
}
exitRoutine := make(chan bool)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mock.NewMockBeaconNodeValidator_WaitForSyncedServer(ctrl)
mockStream.EXPECT().Context().Return(context.Background())
go func(tt *testing.T) {
err := Server.WaitForSynced(&ptypes.Empty{}, mockStream)
assert.ErrorContains(tt, "Context canceled", err)
<-exitRoutine
}(t)
cancel()
exitRoutine <- true
}
func TestWaitForSynced_AlreadySynced(t *testing.T) {
db, _ := dbutil.SetupDB(t)
ctx := context.Background()
headBlockRoot := [32]byte{0x01, 0x02}
trie := testutil.NewBeaconState()
require.NoError(t, trie.SetSlot(3))
require.NoError(t, db.SaveState(ctx, trie, headBlockRoot))
require.NoError(t, db.SaveHeadBlockRoot(ctx, headBlockRoot))
chainService := &mockChain.ChainService{State: trie}
Server := &Server{
Ctx: context.Background(),
ChainStartFetcher: &mockPOW.POWChain{
ChainFeed: new(event.Feed),
},
BeaconDB: db,
StateNotifier: chainService.StateNotifier(),
HeadFetcher: chainService,
SyncChecker: &mockSync.Sync{IsSyncing: false},
}
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mock.NewMockBeaconNodeValidator_WaitForSyncedServer(ctrl)
mockStream.EXPECT().Send(
&ethpb.SyncedResponse{
Synced: true,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
},
).Return(nil)
mockStream.EXPECT().Context().Return(context.Background())
assert.NoError(t, Server.WaitForSynced(&ptypes.Empty{}, mockStream), "Could not call RPC method")
}
func TestWaitForSynced_NotStartedThenLogFired(t *testing.T) {
db, _ := dbutil.SetupDB(t)
hook := logTest.NewGlobal()
chainService := &mockChain.ChainService{}
Server := &Server{
Ctx: context.Background(),
ChainStartFetcher: &mockPOW.FaultyMockPOWChain{
ChainFeed: new(event.Feed),
},
BeaconDB: db,
StateNotifier: chainService.StateNotifier(),
HeadFetcher: chainService,
}
exitRoutine := make(chan bool)
ctrl := gomock.NewController(t)
defer ctrl.Finish()
mockStream := mock.NewMockBeaconNodeValidator_WaitForSyncedServer(ctrl)
mockStream.EXPECT().Send(
&ethpb.SyncedResponse{
Synced: true,
GenesisTime: uint64(time.Unix(0, 0).Unix()),
},
).Return(nil)
mockStream.EXPECT().Context().Return(context.Background())
go func(tt *testing.T) {
assert.NoError(tt, Server.WaitForSynced(&ptypes.Empty{}, mockStream), "Could not call RPC method")
<-exitRoutine
}(t)
// Send in a loop to ensure it is delivered (busy wait for the service to subscribe to the state feed).
for sent := 0; sent == 0; {
sent = Server.StateNotifier.StateFeed().Send(&feed.Event{
Type: statefeed.Synced,
Data: &statefeed.SyncedData{
StartTime: time.Unix(0, 0),
Type: statefeed.Initialized,
Data: &statefeed.InitializedData{
StartTime: time.Unix(0, 0),
GenesisValidatorsRoot: genesisValidatorsRoot[:],
},
})
}

View File

@@ -3,8 +3,6 @@ package validator
import (
"context"
"errors"
"math/big"
"time"
ethpb "github.com/prysmaticlabs/ethereumapis/eth/v1alpha1"
"github.com/prysmaticlabs/prysm/beacon-chain/core/helpers"
@@ -182,11 +180,6 @@ func (vs *Server) validatorStatus(
resp.Status = depositStatus(deposit.Data.Amount)
resp.Eth1DepositBlockNumber = eth1BlockNumBigInt.Uint64()
depositBlockSlot, err := vs.depositBlockSlot(ctx, headState, eth1BlockNumBigInt)
if err != nil {
return resp, nonExistentIndex
}
resp.DepositInclusionSlot = depositBlockSlot
return resp, nonExistentIndex
// Deposited, Pending or Partially Deposited mean the validator has been put into the state.
case ethpb.ValidatorStatus_DEPOSITED, ethpb.ValidatorStatus_PENDING, ethpb.ValidatorStatus_PARTIALLY_DEPOSITED:
@@ -265,30 +258,6 @@ func assignmentStatus(beaconState *stateTrie.BeaconState, validatorIdx uint64) e
return ethpb.ValidatorStatus_EXITED
}
func (vs *Server) depositBlockSlot(ctx context.Context, beaconState *stateTrie.BeaconState, eth1BlockNumBigInt *big.Int) (uint64, error) {
var depositBlockSlot uint64
blockTimeStamp, err := vs.BlockFetcher.BlockTimeByHeight(ctx, eth1BlockNumBigInt)
if err != nil {
return 0, err
}
followTime := time.Duration(params.BeaconConfig().Eth1FollowDistance*params.BeaconConfig().SecondsPerETH1Block) * time.Second
eth1UnixTime := time.Unix(int64(blockTimeStamp), 0).Add(followTime)
period := params.BeaconConfig().SlotsPerEpoch * params.BeaconConfig().EpochsPerEth1VotingPeriod
votingPeriod := time.Duration(period*params.BeaconConfig().SecondsPerSlot) * time.Second
timeToInclusion := eth1UnixTime.Add(votingPeriod)
eth2Genesis := time.Unix(int64(beaconState.GenesisTime()), 0)
if eth2Genesis.After(timeToInclusion) {
depositBlockSlot = 0
} else {
eth2TimeDifference := timeToInclusion.Sub(eth2Genesis).Seconds()
depositBlockSlot = uint64(eth2TimeDifference) / params.BeaconConfig().SecondsPerSlot
}
return depositBlockSlot, nil
}
func depositStatus(depositOrBalance uint64) ethpb.ValidatorStatus {
if depositOrBalance == 0 {
return ethpb.ValidatorStatus_PENDING

View File

@@ -2,7 +2,6 @@ package validator
import (
"context"
"math/big"
"testing"
"time"
@@ -709,131 +708,6 @@ func TestValidatorStatus_CorrectActivationQueue(t *testing.T) {
assert.Equal(t, uint64(2), resp.PositionInActivationQueue, "Unexpected position in activation queue")
}
func TestDepositBlockSlotAfterGenesisTime(t *testing.T) {
db, _ := dbutil.SetupDB(t)
ctx := context.Background()
pubKey := pubKey(1)
depData := &ethpb.Deposit_Data{
PublicKey: pubKey,
Signature: bytesutil.PadTo([]byte("hi"), 96),
WithdrawalCredentials: bytesutil.PadTo([]byte("hey"), 32),
}
deposit := &ethpb.Deposit{
Data: depData,
}
depositTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not setup deposit trie")
depositCache, err := depositcache.New()
require.NoError(t, err)
depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.Root())
timestamp := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
int(params.BeaconConfig().Eth1FollowDistance): uint64(timestamp),
},
}
block := testutil.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, block), "Could not save genesis block")
genesisRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
activeEpoch := helpers.ActivationExitEpoch(0)
state, err := stateTrie.InitializeFromProtoUnsafe(&pbp2p.BeaconState{
GenesisTime: uint64(time.Unix(0, 0).Unix()),
Slot: 10000,
Validators: []*ethpb.Validator{{
ActivationEpoch: activeEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: pubKey},
}},
)
require.NoError(t, err)
vs := &Server{
BeaconDB: db,
ChainStartFetcher: p,
BlockFetcher: p,
Eth1InfoFetcher: p,
DepositFetcher: depositCache,
HeadFetcher: &mockChain.ChainService{State: state, Root: genesisRoot[:]},
}
eth1BlockNumBigInt := big.NewInt(1000000)
resp, err := vs.depositBlockSlot(context.Background(), state, eth1BlockNumBigInt)
require.NoError(t, err, "Could not get the deposit block slot")
assert.Equal(t, uint64(69), resp)
}
func TestDepositBlockSlotBeforeGenesisTime(t *testing.T) {
db, _ := dbutil.SetupDB(t)
ctx := context.Background()
pubKey := pubKey(1)
depData := &ethpb.Deposit_Data{
PublicKey: pubKey,
Signature: bytesutil.PadTo([]byte("hi"), 96),
WithdrawalCredentials: bytesutil.PadTo([]byte("hey"), 32),
}
deposit := &ethpb.Deposit{
Data: depData,
}
depositTrie, err := trieutil.NewTrie(params.BeaconConfig().DepositContractTreeDepth)
require.NoError(t, err, "Could not setup deposit trie")
depositCache, err := depositcache.New()
require.NoError(t, err)
depositCache.InsertDeposit(ctx, deposit, 0 /*blockNum*/, 0, depositTrie.Root())
timestamp := time.Unix(int64(params.BeaconConfig().Eth1FollowDistance), 0).Unix()
p := &mockPOW.POWChain{
TimesByHeight: map[int]uint64{
int(params.BeaconConfig().Eth1FollowDistance): uint64(timestamp),
},
}
block := testutil.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, block), "Could not save genesis block")
genesisRoot, err := block.Block.HashTreeRoot()
require.NoError(t, err, "Could not get signing root")
activeEpoch := helpers.ActivationExitEpoch(0)
state, err := stateTrie.InitializeFromProtoUnsafe(&pbp2p.BeaconState{
GenesisTime: uint64(time.Unix(25000, 0).Unix()),
Slot: 10000,
Validators: []*ethpb.Validator{{
ActivationEpoch: activeEpoch,
ExitEpoch: params.BeaconConfig().FarFutureEpoch,
PublicKey: pubKey},
}},
)
require.NoError(t, err)
vs := &Server{
BeaconDB: db,
ChainStartFetcher: p,
BlockFetcher: p,
Eth1InfoFetcher: p,
DepositFetcher: depositCache,
HeadFetcher: &mockChain.ChainService{State: state, Root: genesisRoot[:]},
}
eth1BlockNumBigInt := big.NewInt(1000000)
resp, err := vs.depositBlockSlot(context.Background(), state, eth1BlockNumBigInt)
require.NoError(t, err, "Could not get the deposit block slot")
assert.Equal(t, uint64(0), resp)
}
func TestMultipleValidatorStatus_Pubkeys(t *testing.T) {
db, _ := dbutil.SetupDB(t)
ctx := context.Background()
@@ -915,9 +789,8 @@ func TestMultipleValidatorStatus_Pubkeys(t *testing.T) {
Status: ethpb.ValidatorStatus_ACTIVE,
},
{
Status: ethpb.ValidatorStatus_DEPOSITED,
DepositInclusionSlot: 69,
ActivationEpoch: 18446744073709551615,
Status: ethpb.ValidatorStatus_DEPOSITED,
ActivationEpoch: 18446744073709551615,
},
{
Status: ethpb.ValidatorStatus_DEPOSITED,

View File

@@ -2,7 +2,6 @@ package validator
import (
"io/ioutil"
"os"
"testing"
"github.com/prysmaticlabs/prysm/shared/params"
@@ -10,15 +9,12 @@ import (
)
func TestMain(m *testing.M) {
run := func() int {
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
// Use minimal config to reduce test setup time.
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
logrus.SetLevel(logrus.DebugLevel)
logrus.SetOutput(ioutil.Discard)
// Use minimal config to reduce test setup time.
prevConfig := params.BeaconConfig().Copy()
defer params.OverrideBeaconConfig(prevConfig)
params.OverrideBeaconConfig(params.MinimalSpecConfig())
return m.Run()
}
os.Exit(run())
m.Run()
}

View File

@@ -2,7 +2,6 @@ package stategen
import "errors"
var errUnknownStateSummary = errors.New("unknown state summary")
var errUnknownBoundaryState = errors.New("unknown boundary state")
var errUnknownState = errors.New("unknown state")
var errUnknownBlock = errors.New("unknown block")

View File

@@ -131,13 +131,13 @@ func (s *State) stateSummary(ctx context.Context, blockRoot [32]byte) (*pb.State
}
}
if summary == nil {
return s.recoverStateSummary(ctx, blockRoot)
return s.RecoverStateSummary(ctx, blockRoot)
}
return summary, nil
}
// This recovers state summary object of a given block root by using the saved block in DB.
func (s *State) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
// RecoverStateSummary recovers state summary object of a given block root by using the saved block in DB.
func (s *State) RecoverStateSummary(ctx context.Context, blockRoot [32]byte) (*pb.StateSummary, error) {
if s.beaconDB.HasBlock(ctx, blockRoot) {
b, err := s.beaconDB.Block(ctx, blockRoot)
if err != nil {
@@ -149,7 +149,7 @@ func (s *State) recoverStateSummary(ctx context.Context, blockRoot [32]byte) (*p
}
return summary, nil
}
return nil, errUnknownStateSummary
return nil, errors.New("could not find block in DB")
}
// This loads a beacon state from either the cache or DB then replay blocks up the requested block root.
@@ -193,6 +193,11 @@ func (s *State) loadStateByRoot(ctx context.Context, blockRoot [32]byte) (*state
return nil, errUnknownBoundaryState
}
// Return state early if we are retrieving it from our finalized state cache.
if startState.Slot() == targetSlot {
return startState, nil
}
blks, err := s.LoadBlocks(ctx, startState.Slot()+1, targetSlot, bytesutil.ToBytes32(summary.Root))
if err != nil {
return nil, errors.Wrap(err, "could not load blocks for hot state using root")
@@ -225,6 +230,13 @@ func (s *State) loadStateBySlot(ctx context.Context, slot uint64) (*state.Beacon
return nil, errors.Wrap(err, "could not get last valid block for hot state using slot")
}
// This is an impossible scenario.
// In the event where current state slot is greater or equal to last valid block slot,
// we should just process state up to input slot.
if startState.Slot() >= lastValidSlot {
return processSlotsStateGen(ctx, startState, slot)
}
// Load and replay blocks to get the intermediate state.
replayBlks, err := s.LoadBlocks(ctx, startState.Slot()+1, lastValidSlot, lastValidRoot)
if err != nil {

View File

@@ -174,10 +174,18 @@ func TestStateBySlot_ColdState(t *testing.T) {
service.slotsPerArchivedPoint = params.BeaconConfig().SlotsPerEpoch * 2
service.finalizedInfo.slot = service.slotsPerArchivedPoint + 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
require.NoError(t, beaconState.SetSlot(1))
b := testutil.NewBeaconBlock()
b.Block.Slot = 1
beaconState, pks := testutil.DeterministicGenesisState(t, 32)
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, db.SaveState(ctx, beaconState, gRoot))
assert.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
b, err := testutil.GenerateFullBlock(beaconState, pks, testutil.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
require.NoError(t, db.SaveBlock(ctx, b))
bRoot, err := b.Block.HashTreeRoot()
require.NoError(t, err)
@@ -200,12 +208,14 @@ func TestStateBySlot_HotStateDB(t *testing.T) {
service := New(db, cache.NewStateSummaryCache())
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
b := testutil.NewBeaconBlock()
require.NoError(t, db.SaveBlock(ctx, b))
bRoot, err := b.Block.HashTreeRoot()
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, db.SaveState(ctx, beaconState, bRoot))
require.NoError(t, db.SaveGenesisBlockRoot(ctx, bRoot))
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, db.SaveState(ctx, beaconState, gRoot))
assert.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
slot := uint64(10)
loadedState, err := service.StateBySlot(ctx, slot)
@@ -222,7 +232,7 @@ func TestStateSummary_CanGetFromCacheOrDB(t *testing.T) {
r := [32]byte{'a'}
summary := &pb.StateSummary{Slot: 100}
_, err := service.stateSummary(ctx, r)
require.ErrorContains(t, errUnknownStateSummary.Error(), err)
require.ErrorContains(t, "could not find block in DB", err)
service.stateSummaryCache.Put(r, summary)
got, err := service.stateSummary(ctx, r)
@@ -234,7 +244,7 @@ func TestStateSummary_CanGetFromCacheOrDB(t *testing.T) {
r = [32]byte{'b'}
summary = &pb.StateSummary{Root: r[:], Slot: 101}
_, err = service.stateSummary(ctx, r)
require.ErrorContains(t, errUnknownStateSummary.Error(), err)
require.ErrorContains(t, "could not find block in DB", err)
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, summary))
got, err = service.stateSummary(ctx, r)
@@ -262,6 +272,33 @@ func TestLoadeStateByRoot_Cached(t *testing.T) {
}
}
func TestLoadeStateByRoot_FinalizedState(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
service := New(db, cache.NewStateSummaryCache())
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 0, Root: gRoot[:]}))
service.finalizedInfo.state = beaconState
service.finalizedInfo.slot = beaconState.Slot()
service.finalizedInfo.root = gRoot
// This tests where hot state was already cached.
loadedState, err := service.loadStateByRoot(ctx, gRoot)
require.NoError(t, err)
if !proto.Equal(loadedState.InnerStateUnsafe(), beaconState.InnerStateUnsafe()) {
t.Error("Did not correctly retrieve finalized state")
}
}
func TestLoadeStateByRoot_EpochBoundaryStateCanProcess(t *testing.T) {
ctx := context.Background()
db, ssc := testDB.SetupDB(t)

View File

@@ -107,10 +107,9 @@ func (s *State) MigrateToCold(ctx context.Context, fRoot [32]byte) error {
}
// Migrate all state summary objects from state summary cache to DB.
if err := s.beaconDB.SaveStateSummaries(ctx, s.stateSummaryCache.GetAll()); err != nil {
if err := s.SaveStateSummariesToDB(ctx); err != nil {
return err
}
s.stateSummaryCache.Clear()
// Update finalized info in memory.
fInfo, ok, err := s.epochBoundaryStateCache.getByRoot(fRoot)

View File

@@ -5,6 +5,7 @@ import (
"testing"
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
"github.com/prysmaticlabs/prysm/beacon-chain/core/blocks"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/testutil"
@@ -67,29 +68,42 @@ func TestMigrateToCold_RegeneratePath(t *testing.T) {
service := New(db, cache.NewStateSummaryCache())
service.slotsPerArchivedPoint = 1
beaconState, _ := testutil.DeterministicGenesisState(t, 32)
stateSlot := uint64(1)
require.NoError(t, beaconState.SetSlot(stateSlot))
blk := testutil.NewBeaconBlock()
blk.Block.Slot = 2
fRoot, err := blk.Block.HashTreeRoot()
beaconState, pks := testutil.DeterministicGenesisState(t, 32)
genesisStateRoot, err := beaconState.HashTreeRoot(ctx)
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveBlock(ctx, blk))
require.NoError(t, service.beaconDB.SaveGenesisBlockRoot(ctx, fRoot))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: fRoot[:]}))
genesis := blocks.NewGenesisBlock(genesisStateRoot[:])
assert.NoError(t, db.SaveBlock(ctx, genesis))
gRoot, err := genesis.Block.HashTreeRoot()
require.NoError(t, err)
assert.NoError(t, db.SaveState(ctx, beaconState, gRoot))
assert.NoError(t, db.SaveGenesisBlockRoot(ctx, gRoot))
b1, err := testutil.GenerateFullBlock(beaconState, pks, testutil.DefaultBlockGenConfig(), 1)
require.NoError(t, err)
r1, err := b1.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveBlock(ctx, b1))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 1, Root: r1[:]}))
b4, err := testutil.GenerateFullBlock(beaconState, pks, testutil.DefaultBlockGenConfig(), 4)
require.NoError(t, err)
r4, err := b4.Block.HashTreeRoot()
require.NoError(t, err)
require.NoError(t, service.beaconDB.SaveBlock(ctx, b4))
require.NoError(t, service.beaconDB.SaveStateSummary(ctx, &pb.StateSummary{Slot: 4, Root: r4[:]}))
service.finalizedInfo = &finalizedInfo{
slot: 1,
root: fRoot,
slot: 0,
root: genesisStateRoot,
state: beaconState,
}
require.NoError(t, service.MigrateToCold(ctx, fRoot))
require.NoError(t, service.MigrateToCold(ctx, r4))
gotState, err := service.beaconDB.State(ctx, fRoot)
s1, err := service.beaconDB.State(ctx, r1)
require.NoError(t, err)
assert.DeepEqual(t, beaconState.InnerStateUnsafe(), gotState.InnerStateUnsafe(), "Did not save state")
gotRoot := service.beaconDB.ArchivedPointRoot(ctx, stateSlot/service.slotsPerArchivedPoint)
assert.Equal(t, fRoot, gotRoot, "Did not save archived root")
assert.Equal(t, s1.Slot(), uint64(1), "Did not save state")
gotRoot := service.beaconDB.ArchivedPointRoot(ctx, 1/service.slotsPerArchivedPoint)
assert.Equal(t, r1, gotRoot, "Did not save archived root")
lastIndex, err := service.beaconDB.LastArchivedSlot(ctx)
require.NoError(t, err)
assert.Equal(t, uint64(1), lastIndex, "Did not save last archived index")

View File

@@ -54,9 +54,8 @@ func (s *State) ReplayBlocks(ctx context.Context, state *stateTrie.BeaconState,
// The Blocks are returned in slot-descending order.
func (s *State) LoadBlocks(ctx context.Context, startSlot, endSlot uint64, endBlockRoot [32]byte) ([]*ethpb.SignedBeaconBlock, error) {
// Nothing to load for invalid range.
// TODO(#7620): Return error for invalid range.
if endSlot < startSlot {
return nil, nil
return nil, fmt.Errorf("start slot %d >= end slot %d", startSlot, endSlot)
}
filter := filters.NewFilter().SetStartSlot(startSlot).SetEndSlot(endSlot)
blocks, blockRoots, err := s.beaconDB.Blocks(ctx, filter)

View File

@@ -43,6 +43,17 @@ func (s *State) ForceCheckpoint(ctx context.Context, root []byte) error {
return s.beaconDB.SaveState(ctx, fs, root32)
}
// SaveStateSummariesToDB saves cached state summaries to DB.
func (s *State) SaveStateSummariesToDB(ctx context.Context) error {
ctx, span := trace.StartSpan(ctx, "stateGen.SaveStateSummariesToDB")
defer span.End()
if err := s.beaconDB.SaveStateSummaries(ctx, s.stateSummaryCache.GetAll()); err != nil {
return err
}
s.stateSummaryCache.Clear()
return nil
}
// SaveStateSummary saves the relevant state summary for a block and its corresponding state slot in the
// state summary cache.
func (s *State) SaveStateSummary(_ context.Context, blk *ethpb.SignedBeaconBlock, blockRoot [32]byte) {

View File

@@ -6,6 +6,7 @@ import (
"github.com/prysmaticlabs/prysm/beacon-chain/cache"
testDB "github.com/prysmaticlabs/prysm/beacon-chain/db/testing"
pb "github.com/prysmaticlabs/prysm/proto/beacon/p2p/v1"
"github.com/prysmaticlabs/prysm/shared/params"
"github.com/prysmaticlabs/prysm/shared/testutil"
"github.com/prysmaticlabs/prysm/shared/testutil/assert"
@@ -202,3 +203,19 @@ func TestEnableSaveHotStateToDB_AlreadyDisabled(t *testing.T) {
require.LogsDoNotContain(t, hook, "Exiting mode to save hot states in DB")
require.Equal(t, false, service.saveHotStateDB.enabled)
}
func TestState_SaveStateSummariesToDB(t *testing.T) {
ctx := context.Background()
db, _ := testDB.SetupDB(t)
service := New(db, cache.NewStateSummaryCache())
r := [32]byte{'a'}
s := &pb.StateSummary{Root: r[:], Slot: 1}
service.stateSummaryCache.Put(r, s)
require.Equal(t, false, service.beaconDB.HasStateSummary(ctx, r))
require.Equal(t, true, service.stateSummaryCache.Has(r))
require.NoError(t, service.SaveStateSummariesToDB(ctx))
require.Equal(t, true, service.beaconDB.HasStateSummary(ctx, r))
require.Equal(t, false, service.stateSummaryCache.Has(r))
}

View File

@@ -1,17 +1,13 @@
package stateutil_test
import (
"os"
"testing"
"github.com/prysmaticlabs/prysm/shared/featureconfig"
)
func TestMain(m *testing.M) {
run := func() int {
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableSSZCache: true})
defer resetCfg()
return m.Run()
}
os.Exit(run())
resetCfg := featureconfig.InitWithReset(&featureconfig.Flags{EnableSSZCache: true})
defer resetCfg()
m.Run()
}

Some files were not shown because too many files have changed in this diff Show More