Files
prysm/validator/db/kv/migration_optimal_attester_protection_test.go
Preston Van Loon 2fd6bd8150 Add golang.org/x/tools modernize static analyzer and fix violations (#15946)
* Ran gopls modernize to fix everything

go run golang.org/x/tools/gopls/internal/analysis/modernize/cmd/modernize@latest -fix -test ./...

* Override rules_go provided dependency for golang.org/x/tools to v0.38.0.

To update this, checked out rules_go, then ran `bazel run //go/tools/releaser -- upgrade-dep -mirror=false org_golang_x_tools` and copied the patches.

* Fix buildtag violations and ignore buildtag violations in external

* Introduce modernize analyzer package.

* Add modernize "any" analyzer.

* Fix violations of any analyzer

* Add modernize "appendclipped" analyzer.

* Fix violations of appendclipped

* Add modernize "bloop" analyzer.

* Add modernize "fmtappendf" analyzer.

* Add modernize "forvar" analyzer.

* Add modernize "mapsloop" analyzer.

* Add modernize "minmax" analyzer.

* Fix violations of minmax analyzer

* Add modernize "omitzero" analyzer.

* Add modernize "rangeint" analyzer.

* Fix violations of rangeint.

* Add modernize "reflecttypefor" analyzer.

* Fix violations of reflecttypefor analyzer.

* Add modernize "slicescontains" analyzer.

* Add modernize "slicessort" analyzer.

* Add modernize "slicesdelete" analyzer. This is disabled by default for now. See https://go.dev/issue/73686.

* Add modernize "stringscutprefix" analyzer.

* Add modernize "stringsbuilder" analyzer.

* Fix violations of stringsbuilder analyzer.

* Add modernize "stringsseq" analyzer.

* Add modernize "testingcontext" analyzer.

* Add modernize "waitgroup" analyzer.

* Changelog fragment

* gofmt

* gazelle

* Add modernize "newexpr" analyzer.

* Disable newexpr until go1.26

* Add more details in WORKSPACE on how to update the override

* @nalepae feedback on min()

* gofmt

* Fix violations of forvar
2025-11-14 01:27:22 +00:00

300 lines
11 KiB
Go

package kv
import (
"fmt"
"testing"
fieldparams "github.com/OffchainLabs/prysm/v7/config/fieldparams"
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
"github.com/OffchainLabs/prysm/v7/testing/require"
bolt "go.etcd.io/bbolt"
)
func Test_migrateOptimalAttesterProtectionUp(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, validatorDB *Store)
eval func(t *testing.T, validatorDB *Store)
}{
{
name: "only runs once",
setup: func(t *testing.T, validatorDB *Store) {
err := validatorDB.update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationOptimalAttesterProtectionKey, migrationCompleted)
})
require.NoError(t, err)
},
eval: func(t *testing.T, validatorDB *Store) {
err := validatorDB.view(func(tx *bolt.Tx) error {
data := tx.Bucket(migrationsBucket).Get(migrationOptimalAttesterProtectionKey)
require.DeepEqual(t, data, migrationCompleted)
return nil
})
require.NoError(t, err)
},
},
{
name: "populates optimized schema buckets",
setup: func(t *testing.T, validatorDB *Store) {
pubKey := [fieldparams.BLSPubkeyLength]byte{1}
history := newDeprecatedAttestingHistory(0)
// Attest all epochs from genesis to 50.
numEpochs := primitives.Epoch(50)
for i := primitives.Epoch(1); i <= numEpochs; i++ {
var sr [32]byte
copy(sr[:], fmt.Sprintf("%d", i))
newHist, err := history.setTargetData(i, &deprecatedHistoryData{
Source: i - 1,
SigningRoot: sr[:],
})
require.NoError(t, err)
history = newHist
}
newHist, err := history.setLatestEpochWritten(numEpochs)
require.NoError(t, err)
err = validatorDB.update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(deprecatedAttestationHistoryBucket)
return bucket.Put(pubKey[:], newHist)
})
require.NoError(t, err)
},
eval: func(t *testing.T, validatorDB *Store) {
// Verify we indeed have the data for all epochs
// since genesis to epoch 50 under the new schema.
err := validatorDB.view(func(tx *bolt.Tx) error {
pubKey := [fieldparams.BLSPubkeyLength]byte{1}
bucket := tx.Bucket(pubKeysBucket)
pkBucket := bucket.Bucket(pubKey[:])
signingRootsBucket := pkBucket.Bucket(attestationSigningRootsBucket)
sourceEpochsBucket := pkBucket.Bucket(attestationSourceEpochsBucket)
numEpochs := uint64(50)
// Verify we have signing roots for target epochs 1 to 50 correctly.
for targetEpoch := uint64(1); targetEpoch <= numEpochs; targetEpoch++ {
var sr [32]byte
copy(sr[:], fmt.Sprintf("%d", targetEpoch))
targetEpochBytes := bytesutil.Uint64ToBytesBigEndian(targetEpoch)
migratedSigningRoot := signingRootsBucket.Get(targetEpochBytes)
require.DeepEqual(t, sr[:], migratedSigningRoot)
}
// Verify we have (source epoch, target epoch) pairs for epochs 0 to 50 correctly.
for sourceEpoch := range numEpochs {
sourceEpochBytes := bytesutil.Uint64ToBytesBigEndian(sourceEpoch)
targetEpochBytes := sourceEpochsBucket.Get(sourceEpochBytes)
targetEpoch := bytesutil.BytesToUint64BigEndian(targetEpochBytes)
require.Equal(t, sourceEpoch+1, targetEpoch)
}
return nil
})
require.NoError(t, err)
},
},
{
name: "partial data saved for both types still completes the migration successfully",
setup: func(t *testing.T, validatorDB *Store) {
ctx := t.Context()
pubKey := [fieldparams.BLSPubkeyLength]byte{1}
history := newDeprecatedAttestingHistory(0)
// Attest all epochs from genesis to 50.
numEpochs := primitives.Epoch(50)
for i := primitives.Epoch(1); i <= numEpochs; i++ {
var sr [32]byte
copy(sr[:], fmt.Sprintf("%d", i))
newHist, err := history.setTargetData(i, &deprecatedHistoryData{
Source: i - 1,
SigningRoot: sr[:],
})
require.NoError(t, err)
history = newHist
}
newHist, err := history.setLatestEpochWritten(numEpochs)
require.NoError(t, err)
err = validatorDB.update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(deprecatedAttestationHistoryBucket)
return bucket.Put(pubKey[:], newHist)
})
require.NoError(t, err)
// Run the migration.
require.NoError(t, validatorDB.migrateOptimalAttesterProtectionUp(ctx))
// Then delete the migration completed key.
err = validatorDB.update(func(tx *bolt.Tx) error {
mb := tx.Bucket(migrationsBucket)
return mb.Delete(migrationOptimalAttesterProtectionKey)
})
require.NoError(t, err)
// Write one more entry to the DB with the old format.
var sr [32]byte
copy(sr[:], fmt.Sprintf("%d", numEpochs+1))
newHist, err = newHist.setTargetData(numEpochs+1, &deprecatedHistoryData{
Source: numEpochs,
SigningRoot: sr[:],
})
require.NoError(t, err)
newHist, err = newHist.setLatestEpochWritten(numEpochs + 1)
require.NoError(t, err)
err = validatorDB.update(func(tx *bolt.Tx) error {
bucket := tx.Bucket(deprecatedAttestationHistoryBucket)
return bucket.Put(pubKey[:], newHist)
})
require.NoError(t, err)
},
eval: func(t *testing.T, validatorDB *Store) {
// Verify we indeed have the data for all epochs
// since genesis to epoch 50+1 under the new schema.
err := validatorDB.view(func(tx *bolt.Tx) error {
pubKey := [fieldparams.BLSPubkeyLength]byte{1}
bucket := tx.Bucket(pubKeysBucket)
pkBucket := bucket.Bucket(pubKey[:])
signingRootsBucket := pkBucket.Bucket(attestationSigningRootsBucket)
sourceEpochsBucket := pkBucket.Bucket(attestationSourceEpochsBucket)
numEpochs := uint64(50)
// Verify we have signing roots for target epochs 1 to 50 correctly.
for targetEpoch := uint64(1); targetEpoch <= numEpochs+1; targetEpoch++ {
var sr [32]byte
copy(sr[:], fmt.Sprintf("%d", targetEpoch))
targetEpochBytes := bytesutil.Uint64ToBytesBigEndian(targetEpoch)
migratedSigningRoot := signingRootsBucket.Get(targetEpochBytes)
require.DeepEqual(t, sr[:], migratedSigningRoot)
}
// Verify we have (source epoch, target epoch) pairs for epochs 0 to 50 correctly.
for sourceEpoch := uint64(0); sourceEpoch < numEpochs+1; sourceEpoch++ {
sourceEpochBytes := bytesutil.Uint64ToBytesBigEndian(sourceEpoch)
targetEpochBytes := sourceEpochsBucket.Get(sourceEpochBytes)
targetEpoch := bytesutil.BytesToUint64BigEndian(targetEpochBytes)
require.Equal(t, sourceEpoch+1, targetEpoch)
}
return nil
})
require.NoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
validatorDB := setupDB(t, nil)
tt.setup(t, validatorDB)
require.NoError(t, validatorDB.migrateOptimalAttesterProtectionUp(t.Context()))
tt.eval(t, validatorDB)
})
}
}
func Test_migrateOptimalAttesterProtectionDown(t *testing.T) {
tests := []struct {
name string
setup func(t *testing.T, validatorDB *Store)
eval func(t *testing.T, validatorDB *Store)
}{
{
name: "unsets the migration completed key upon completion",
setup: func(t *testing.T, validatorDB *Store) {
err := validatorDB.update(func(tx *bolt.Tx) error {
return tx.Bucket(migrationsBucket).Put(migrationOptimalAttesterProtectionKey, migrationCompleted)
})
require.NoError(t, err)
},
eval: func(t *testing.T, validatorDB *Store) {
err := validatorDB.view(func(tx *bolt.Tx) error {
data := tx.Bucket(migrationsBucket).Get(migrationOptimalAttesterProtectionKey)
require.DeepEqual(t, true, data == nil)
return nil
})
require.NoError(t, err)
},
},
{
name: "unsets the migration, even if unset already (no panic)",
setup: func(t *testing.T, validatorDB *Store) {},
eval: func(t *testing.T, validatorDB *Store) {
// Ensure the migration is not marked as complete.
err := validatorDB.view(func(tx *bolt.Tx) error {
data := tx.Bucket(migrationsBucket).Get(migrationOptimalAttesterProtectionKey)
require.DeepNotEqual(t, data, migrationCompleted)
return nil
})
require.NoError(t, err)
},
},
{
name: "populates old format from data using the new schema",
setup: func(t *testing.T, validatorDB *Store) {
pubKeys := [][fieldparams.BLSPubkeyLength]byte{{1}, {2}}
// Create attesting history for two public keys
err := validatorDB.update(func(tx *bolt.Tx) error {
bkt := tx.Bucket(pubKeysBucket)
for _, pubKey := range pubKeys {
pkBucket, err := bkt.CreateBucketIfNotExists(pubKey[:])
if err != nil {
return err
}
sourceEpochsBucket, err := pkBucket.CreateBucketIfNotExists(attestationSourceEpochsBucket)
if err != nil {
return err
}
signingRootsBucket, err := pkBucket.CreateBucketIfNotExists(attestationSigningRootsBucket)
if err != nil {
return err
}
// The highest epoch we write is 50.
highestEpoch := uint64(50)
for i := uint64(1); i <= highestEpoch; i++ {
source := bytesutil.Uint64ToBytesBigEndian(i - 1)
target := bytesutil.Uint64ToBytesBigEndian(i)
if err := sourceEpochsBucket.Put(source, target); err != nil {
return err
}
var signingRoot [32]byte
copy(signingRoot[:], fmt.Sprintf("%d", target))
if err := signingRootsBucket.Put(target, signingRoot[:]); err != nil {
return err
}
}
}
// Finally, we mark the migration as completed to show that we have the
// new, optimized format for attester protection in the database.
migrationBkt := tx.Bucket(migrationsBucket)
return migrationBkt.Put(migrationOptimalAttesterProtectionKey, migrationCompleted)
})
require.NoError(t, err)
},
eval: func(t *testing.T, validatorDB *Store) {
pubKeys := [][fieldparams.BLSPubkeyLength]byte{{1}, {2}}
// Next up, we validate that we have indeed rolled back our data
// into the old format for attesting history.
err := validatorDB.view(func(tx *bolt.Tx) error {
bkt := tx.Bucket(deprecatedAttestationHistoryBucket)
for _, pubKey := range pubKeys {
encodedHistoryBytes := bkt.Get(pubKey[:])
require.NotNil(t, encodedHistoryBytes)
attestingHistory := deprecatedEncodedAttestingHistory(encodedHistoryBytes)
highestEpoch, err := attestingHistory.getLatestEpochWritten()
require.NoError(t, err)
// Verify the highest epoch written is 50 from the setup stage.
require.Equal(t, primitives.Epoch(50), highestEpoch)
}
return nil
})
require.NoError(t, err)
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
validatorDB := setupDB(t, nil)
tt.setup(t, validatorDB)
require.NoError(t, validatorDB.migrateOptimalAttesterProtectionDown(t.Context()))
tt.eval(t, validatorDB)
})
}
}