Compare commits

..

3 Commits

Author SHA1 Message Date
rkapka
6c64a1fbe0 Use errgroup in OptimizedValidatorRoots 2026-01-30 23:44:45 +09:00
Justin Traglia
55fe85c887 Add ability to download nightly tests from a specific night (#16298)
**What type of PR is this?**

Feature

**What does this PR do? Why is it needed?**

This PR allows devs to test against a specific run of the nightly
reference test generator.

**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-01-29 21:38:13 +00:00
Justin Traglia
31f77567dd Add a README for specrefs (#16302)
**What type of PR is this?**

Documentation

**What does this PR do? Why is it needed?**

This PR adds a basic README for the specrefs.


**Acknowledgements**

- [x] I have read
[CONTRIBUTING.md](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md).
- [x] I have included a uniquely named [changelog fragment
file](https://github.com/prysmaticlabs/prysm/blob/develop/CONTRIBUTING.md#maintaining-changelogmd).
- [x] I have added a description with sufficient context for reviewers
to understand this PR.
- [x] I have tested that my changes work as expected and I added a
testing plan to the PR description (if applicable).
2026-01-29 20:36:29 +00:00
12 changed files with 98 additions and 536 deletions

View File

@@ -1,32 +0,0 @@
load("@prysm//tools/go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = ["pool.go"],
importpath = "github.com/OffchainLabs/prysm/v7/beacon-chain/operations/payloadattestation",
visibility = ["//visibility:public"],
deps = [
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//crypto/hash:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
"@org_golang_google_protobuf//proto:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["pool_test.go"],
embed = [":go_default_library"],
deps = [
"//consensus-types/primitives:go_default_library",
"//crypto/bls:go_default_library",
"//encoding/bytesutil:go_default_library",
"//proto/prysm/v1alpha1:go_default_library",
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
],
)

View File

@@ -1,179 +0,0 @@
package payloadattestation
import (
"sync"
"github.com/OffchainLabs/go-bitfield"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/crypto/hash"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
var errNilPayloadAttestationMessage = errors.New("nil payload attestation message")
// PoolManager maintains pending payload attestations.
// This pool is used by proposers to insert payload attestations into new blocks.
type PoolManager interface {
// PendingPayloadAttestations returns all pending aggregated payload attestations.
// If a slot is provided, only attestations for that slot are returned.
PendingPayloadAttestations(slot ...primitives.Slot) []*ethpb.PayloadAttestation
// InsertPayloadAttestation inserts or aggregates a payload attestation
// message into the pool. The idx parameter is the PTC committee index
// of the validator (position in the bitvector).
InsertPayloadAttestation(msg *ethpb.PayloadAttestationMessage, idx uint64) error
// Seen returns true if the PTC committee index has already been seen
// for the given PayloadAttestationData.
Seen(data *ethpb.PayloadAttestationData, idx uint64) bool
// MarkIncluded removes the attestation matching the given data from the pool.
MarkIncluded(att *ethpb.PayloadAttestation)
}
// Pool is a concrete implementation of PoolManager.
// Keyed by hash of PayloadAttestationData; stores aggregated PayloadAttestation.
type Pool struct {
lock sync.RWMutex
pending map[[32]byte]*ethpb.PayloadAttestation
}
// NewPool returns an initialized pool.
func NewPool() *Pool {
return &Pool{
pending: make(map[[32]byte]*ethpb.PayloadAttestation),
}
}
// PendingPayloadAttestations returns all pending payload attestations.
// If a slot filter is provided, only attestations for that slot are returned.
func (p *Pool) PendingPayloadAttestations(slot ...primitives.Slot) []*ethpb.PayloadAttestation {
p.lock.RLock()
defer p.lock.RUnlock()
result := make([]*ethpb.PayloadAttestation, 0, len(p.pending))
for _, att := range p.pending {
if len(slot) > 0 && att.Data.Slot != slot[0] {
continue
}
result = append(result, att)
}
return result
}
// InsertPayloadAttestation inserts a payload attestation message into the pool,
// aggregating it with any existing attestation that shares the same PayloadAttestationData.
// The idx parameter is the PTC committee index used to set the aggregation bit.
func (p *Pool) InsertPayloadAttestation(msg *ethpb.PayloadAttestationMessage, idx uint64) error {
if msg == nil || msg.Data == nil {
return errNilPayloadAttestationMessage
}
key, err := dataKey(msg.Data)
if err != nil {
return errors.Wrap(err, "could not compute data key")
}
p.lock.Lock()
defer p.lock.Unlock()
existing, ok := p.pending[key]
if !ok {
p.pending[key] = messageToPayloadAttestation(msg, idx)
return nil
}
if existing.AggregationBits.BitAt(idx) {
return nil
}
sig, err := aggregateSigFromMessage(existing, msg)
if err != nil {
return errors.Wrap(err, "could not aggregate signatures")
}
existing.Signature = sig
existing.AggregationBits.SetBitAt(idx, true)
return nil
}
// Seen returns true if the PTC committee index has already been seen
// for the given PayloadAttestationData.
func (p *Pool) Seen(data *ethpb.PayloadAttestationData, idx uint64) bool {
if data == nil {
return false
}
key, err := dataKey(data)
if err != nil {
return false
}
p.lock.RLock()
defer p.lock.RUnlock()
existing, ok := p.pending[key]
if !ok {
return false
}
return existing.AggregationBits.BitAt(idx)
}
// MarkIncluded removes the attestation with matching data from the pool.
func (p *Pool) MarkIncluded(att *ethpb.PayloadAttestation) {
if att == nil || att.Data == nil {
return
}
key, err := dataKey(att.Data)
if err != nil {
return
}
p.lock.Lock()
defer p.lock.Unlock()
delete(p.pending, key)
}
// messageToPayloadAttestation creates a PayloadAttestation with a single
// aggregated bit from the passed PayloadAttestationMessage.
func messageToPayloadAttestation(msg *ethpb.PayloadAttestationMessage, idx uint64) *ethpb.PayloadAttestation {
bits := bitfield.NewBitvector512()
bits.SetBitAt(idx, true)
data := &ethpb.PayloadAttestationData{
BeaconBlockRoot: bytesutil.SafeCopyBytes(msg.Data.BeaconBlockRoot),
Slot: msg.Data.Slot,
PayloadPresent: msg.Data.PayloadPresent,
BlobDataAvailable: msg.Data.BlobDataAvailable,
}
return &ethpb.PayloadAttestation{
AggregationBits: bits,
Data: data,
Signature: bytesutil.SafeCopyBytes(msg.Signature),
}
}
// aggregateSigFromMessage returns the aggregated signature by combining the
// existing aggregated signature with the message's signature.
func aggregateSigFromMessage(aggregated *ethpb.PayloadAttestation, message *ethpb.PayloadAttestationMessage) ([]byte, error) {
aggSig, err := bls.SignatureFromBytesNoValidation(aggregated.Signature)
if err != nil {
return nil, err
}
sig, err := bls.SignatureFromBytesNoValidation(message.Signature)
if err != nil {
return nil, err
}
return bls.AggregateSignatures([]bls.Signature{aggSig, sig}).Marshal(), nil
}
// dataKey computes a deterministic key for PayloadAttestationData
// by hashing its serialized form.
func dataKey(data *ethpb.PayloadAttestationData) ([32]byte, error) {
enc, err := proto.Marshal(data)
if err != nil {
return [32]byte{}, err
}
return hash.Hash(enc), nil
}

View File

@@ -1,291 +0,0 @@
package payloadattestation
import (
"testing"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/crypto/bls"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
)
func TestPool_PendingPayloadAttestations(t *testing.T) {
t.Run("empty pool", func(t *testing.T) {
pool := NewPool()
atts := pool.PendingPayloadAttestations()
assert.Equal(t, 0, len(atts))
})
t.Run("non-empty pool", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
msg1 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
msg2 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 1,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 2,
PayloadPresent: false,
BlobDataAvailable: true,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg1, 0))
require.NoError(t, pool.InsertPayloadAttestation(msg2, 1))
atts := pool.PendingPayloadAttestations()
assert.Equal(t, 2, len(atts))
})
t.Run("filter by slot", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
msg1 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
msg2 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 1,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 2,
PayloadPresent: false,
BlobDataAvailable: true,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg1, 0))
require.NoError(t, pool.InsertPayloadAttestation(msg2, 1))
atts := pool.PendingPayloadAttestations(primitives.Slot(1))
assert.Equal(t, 1, len(atts))
assert.Equal(t, primitives.Slot(1), atts[0].Data.Slot)
atts = pool.PendingPayloadAttestations(primitives.Slot(2))
assert.Equal(t, 1, len(atts))
assert.Equal(t, primitives.Slot(2), atts[0].Data.Slot)
atts = pool.PendingPayloadAttestations(primitives.Slot(99))
assert.Equal(t, 0, len(atts))
})
}
func TestPool_InsertPayloadAttestation(t *testing.T) {
t.Run("nil message", func(t *testing.T) {
pool := NewPool()
err := pool.InsertPayloadAttestation(nil, 0)
require.ErrorContains(t, "nil payload attestation message", err)
})
t.Run("nil data", func(t *testing.T) {
pool := NewPool()
err := pool.InsertPayloadAttestation(&ethpb.PayloadAttestationMessage{}, 0)
require.ErrorContains(t, "nil payload attestation message", err)
})
t.Run("insert creates new entry with correct aggregation bit", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
idx := uint64(5)
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg, idx))
atts := pool.PendingPayloadAttestations()
require.Equal(t, 1, len(atts))
assert.Equal(t, true, atts[0].AggregationBits.BitAt(idx))
assert.Equal(t, false, atts[0].AggregationBits.BitAt(idx+1))
})
t.Run("duplicate index is no-op", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
idx := uint64(3)
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg, idx))
firstSig := bytesutil.SafeCopyBytes(pool.PendingPayloadAttestations()[0].Signature)
require.NoError(t, pool.InsertPayloadAttestation(msg, idx))
atts := pool.PendingPayloadAttestations()
require.Equal(t, 1, len(atts))
assert.DeepEqual(t, firstSig, atts[0].Signature)
})
t.Run("aggregates different indices", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
root := make([]byte, 32)
root[0] = 'r'
data := &ethpb.PayloadAttestationData{
BeaconBlockRoot: root,
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
}
msg1 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: data,
Signature: sig,
}
msg2 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 1,
Data: data,
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg1, 5))
require.NoError(t, pool.InsertPayloadAttestation(msg2, 7))
atts := pool.PendingPayloadAttestations()
require.Equal(t, 1, len(atts))
assert.Equal(t, true, atts[0].AggregationBits.BitAt(5))
assert.Equal(t, true, atts[0].AggregationBits.BitAt(7))
assert.Equal(t, false, atts[0].AggregationBits.BitAt(6))
})
t.Run("different data creates separate entries", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
msg1 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
msg2 := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 1,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: false, // different
BlobDataAvailable: false,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg1, 0))
require.NoError(t, pool.InsertPayloadAttestation(msg2, 1))
atts := pool.PendingPayloadAttestations()
assert.Equal(t, 2, len(atts))
})
}
func TestPool_Seen(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
data := &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
}
assert.Equal(t, false, pool.Seen(data, 5))
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: data,
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg, 5))
assert.Equal(t, true, pool.Seen(data, 5))
assert.Equal(t, false, pool.Seen(data, 6))
assert.Equal(t, false, pool.Seen(nil, 5))
}
func TestPool_MarkIncluded(t *testing.T) {
t.Run("mark included removes from pool", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg, 0))
assert.Equal(t, 1, len(pool.PendingPayloadAttestations()))
pool.MarkIncluded(&ethpb.PayloadAttestation{
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
})
assert.Equal(t, 0, len(pool.PendingPayloadAttestations()))
})
t.Run("mark included with non-matching data does nothing", func(t *testing.T) {
pool := NewPool()
sig := bls.NewAggregateSignature().Marshal()
msg := &ethpb.PayloadAttestationMessage{
ValidatorIndex: 0,
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 1,
PayloadPresent: true,
BlobDataAvailable: false,
},
Signature: sig,
}
require.NoError(t, pool.InsertPayloadAttestation(msg, 0))
pool.MarkIncluded(&ethpb.PayloadAttestation{
Data: &ethpb.PayloadAttestationData{
BeaconBlockRoot: make([]byte, 32),
Slot: 999,
PayloadPresent: true,
BlobDataAvailable: false,
},
})
assert.Equal(t, 1, len(pool.PendingPayloadAttestations()))
})
t.Run("mark included with nil is safe", func(t *testing.T) {
pool := NewPool()
pool.MarkIncluded(nil)
pool.MarkIncluded(&ethpb.PayloadAttestation{})
})
}

View File

@@ -46,6 +46,7 @@ go_library(
"//proto/prysm/v1alpha1:go_default_library",
"@com_github_pkg_errors//:go_default_library",
"@com_github_sirupsen_logrus//:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)
@@ -78,6 +79,7 @@ go_test(
"//testing/assert:go_default_library",
"//testing/require:go_default_library",
"//testing/util:go_default_library",
"@org_golang_x_sync//errgroup:go_default_library",
],
)

View File

@@ -2,16 +2,16 @@ package stateutil
import (
"bytes"
"context"
"encoding/binary"
"runtime"
"sync"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
"github.com/OffchainLabs/prysm/v7/encoding/ssz"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/sync/errgroup"
)
const (
@@ -54,17 +54,23 @@ func validatorRegistryRoot(validators []*ethpb.Validator) ([32]byte, error) {
return res, nil
}
func hashValidatorHelper(validators []*ethpb.Validator, roots [][32]byte, j int, groupSize int, wg *sync.WaitGroup) {
defer wg.Done()
for i := range groupSize {
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
if err != nil {
logrus.WithError(err).Error("Could not get validator field roots")
return
}
for k, root := range fRoots {
roots[(j*groupSize+i)*validatorFieldRoots+k] = root
func hashValidatorHelper(ctx context.Context, validators []*ethpb.Validator, roots [][32]byte, j int, groupSize int) func() error {
return func() error {
for i := range groupSize {
select {
case <-ctx.Done():
return ctx.Err()
default:
fRoots, err := ValidatorFieldRoots(validators[j*groupSize+i])
if err != nil {
return errors.Wrap(err, "could not get validator field roots")
}
for k, root := range fRoots {
roots[(j*groupSize+i)*validatorFieldRoots+k] = root
}
}
}
return nil
}
}
@@ -75,14 +81,13 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
if len(validators) == 0 {
return [][32]byte{}, nil
}
wg := sync.WaitGroup{}
g, ctx := errgroup.WithContext(context.Background())
n := runtime.GOMAXPROCS(0)
rootsSize := len(validators) * validatorFieldRoots
groupSize := len(validators) / n
roots := make([][32]byte, rootsSize)
wg.Add(n - 1)
for j := 0; j < n-1; j++ {
go hashValidatorHelper(validators, roots, j, groupSize, &wg)
g.Go(hashValidatorHelper(ctx, validators, roots, j, groupSize))
}
for i := (n - 1) * groupSize; i < len(validators); i++ {
fRoots, err := ValidatorFieldRoots(validators[i])
@@ -93,7 +98,9 @@ func OptimizedValidatorRoots(validators []*ethpb.Validator) ([][32]byte, error)
roots[i*validatorFieldRoots+k] = root
}
}
wg.Wait()
if err := g.Wait(); err != nil {
return [][32]byte{}, err
}
// A validator's tree can represented with a depth of 3. As log2(8) = 3
// Using this property we can lay out all the individual fields of a

View File

@@ -3,13 +3,13 @@ package stateutil
import (
"reflect"
"strings"
"sync"
"testing"
mathutil "github.com/OffchainLabs/prysm/v7/math"
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
"github.com/OffchainLabs/prysm/v7/testing/assert"
"github.com/OffchainLabs/prysm/v7/testing/require"
"golang.org/x/sync/errgroup"
)
func TestValidatorConstants(t *testing.T) {
@@ -34,15 +34,15 @@ func TestValidatorConstants(t *testing.T) {
}
func TestHashValidatorHelper(t *testing.T) {
wg := sync.WaitGroup{}
wg.Add(1)
g, ctx := errgroup.WithContext(t.Context())
v := &ethpb.Validator{}
valList := make([]*ethpb.Validator, 10*validatorFieldRoots)
for i := range valList {
valList[i] = v
}
roots := make([][32]byte, len(valList))
hashValidatorHelper(valList, roots, 2, 2, &wg)
g.Go(hashValidatorHelper(ctx, valList, roots, 2, 2))
require.NoError(t, g.Wait())
for i := range 4 * validatorFieldRoots {
require.Equal(t, [32]byte{}, roots[i])
}

View File

@@ -0,0 +1,3 @@
### Added
- Added README for maintaining specrefs.

View File

@@ -0,0 +1,3 @@
### Added
- The ability to download the nightly reference tests from a specific day.

View File

@@ -0,0 +1,3 @@
### Changed
- Use `errgroup` in `OptimizedValidatorRoots`.

35
specrefs/README.md Normal file
View File

@@ -0,0 +1,35 @@
# Specification References
This directory contains specification reference tracking files managed by
[ethspecify](https://github.com/jtraglia/ethspecify).
## Installation
Install `ethspecify` with the following command:
```bash
pipx install ethspecify
```
> [!NOTE]
> You can run `ethspecify <cmd>` in the `specrefs` directory or
> `ethspecify <cmd> --path=specrefs` from the project's root directory.
## Maintenance
When adding support for a new specification version, follow these steps:
0. Change directory into the `specrefs` directory.
1. Update the version in `.ethspecify.yml` configuration.
2. Run `ethspecify process` to update/populate specrefs.
3. Run `ethspecify check` to check specrefs.
4. If there are errors, use the error message as a guide to fix the issue. If
there are new specrefs with empty sources, implement/locate each item and
update each specref source list. If you choose not to implement an item,
add an exception to the appropriate section the the `.ethspecify.yml`
configuration.
5. Repeat steps 3 and 4 until `ethspecify check` passes.
6. Run `git diff` to view updated specrefs. If an object/function/etc has
changed, make the necessary updates to the implementation.
7. Lastly, in the project's root directory, run `act -j check-specrefs` to
ensure everything is correct.

View File

@@ -21,10 +21,14 @@ There are tests for mainnet and minimal config, so for each config we will add a
## Running nightly spectests
Since [PR 15312](https://github.com/OffchainLabs/prysm/pull/15312), Prysm has support to download "nightly" spectests from github via a starlark rule configuration by environment variable.
Set `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly` when running spectest to download the "nightly" spectests.
Note: A GITHUB_TOKEN environment variable is required to be set. The github token must be a [fine grained token](https://docs.github.com/en/authentication/keeping-your-account-and-data-secure/managing-your-personal-access-tokens#creating-a-fine-grained-personal-access-token).
Since [PR 15312](https://github.com/OffchainLabs/prysm/pull/15312), Prysm has support to download "nightly" spectests from github via a starlark rule configuration by environment variable.
Set `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly` or `--repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-<run_id>` when running spectest to download the "nightly" spectests.
Note: A GITHUB_TOKEN environment variable is required to be set. The github token does not need to be associated with your main account; it can be from a "burner account". And the token does not need to be a fine-grained token; it can be a classic token.
```
bazel test //... --test_tag_filters=spectest --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly
```
```
bazel test //... --test_tag_filters=spectest --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-21422848633
```

View File

@@ -1,5 +1,6 @@
# bazel build @consensus_spec_tests//:test_data
# bazel build @consensus_spec_tests//:test_data --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly
# bazel build @consensus_spec_tests//:test_data --repo_env=CONSENSUS_SPEC_TESTS_VERSION=nightly-<run_id>
def _get_redirected_url(repository_ctx, url, headers):
if not repository_ctx.which("curl"):
@@ -24,7 +25,7 @@ def _impl(repository_ctx):
version = repository_ctx.getenv("CONSENSUS_SPEC_TESTS_VERSION") or repository_ctx.attr.version
token = repository_ctx.getenv("GITHUB_TOKEN") or ""
if version == "nightly":
if version == "nightly" or version.startswith("nightly-"):
print("Downloading nightly tests")
if not token:
fail("Error GITHUB_TOKEN is not set")
@@ -34,16 +35,22 @@ def _impl(repository_ctx):
"Accept": "application/vnd.github+json",
}
repository_ctx.download(
"https://api.github.com/repos/%s/actions/workflows/%s/runs?branch=%s&status=success&per_page=1"
% (repository_ctx.attr.repo, repository_ctx.attr.workflow, repository_ctx.attr.branch),
headers = headers,
output = "runs.json"
)
if version.startswith("nightly-"):
run_id = version.split("nightly-", 1)[1]
if not run_id:
fail("Error invalid run id")
else:
repository_ctx.download(
"https://api.github.com/repos/%s/actions/workflows/%s/runs?branch=%s&status=success&per_page=1"
% (repository_ctx.attr.repo, repository_ctx.attr.workflow, repository_ctx.attr.branch),
headers = headers,
output = "runs.json"
)
run_id = json.decode(repository_ctx.read("runs.json"))["workflow_runs"][0]["id"]
repository_ctx.delete("runs.json")
run_id = json.decode(repository_ctx.read("runs.json"))["workflow_runs"][0]["id"]
repository_ctx.delete("runs.json")
print("Run id:", run_id)
repository_ctx.download(
"https://api.github.com/repos/%s/actions/runs/%s/artifacts"
% (repository_ctx.attr.repo, run_id),
@@ -108,8 +115,8 @@ consensus_spec_tests = repository_rule(
"version": attr.string(mandatory = True),
"flavors": attr.string_dict(mandatory = True),
"repo": attr.string(default = "ethereum/consensus-specs"),
"workflow": attr.string(default = "generate_vectors.yml"),
"branch": attr.string(default = "dev"),
"workflow": attr.string(default = "nightly-reftests.yml"),
"branch": attr.string(default = "master"),
"release_url_template": attr.string(default = "https://github.com/ethereum/consensus-specs/releases/download/%s"),
},
)