mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-08 21:08:10 -05:00
PeerDAS: Implement small, unrelated changes. (#15386)
* `verifyBlobCommitmentCount`: Print max allowed blob count in error message. * `TestPersist`: Use `fieldparams.RootLength` instead of `32`. * `TestDataColumnSidecarsByRootReq_Marshal`: Remove blank line. * `ConvertPeerIDToNodeID`: Improve readability by using one line per field. * `parseIndices`: Return `[]int` instead of `[]uint64`. Rational: These indices are used in `func (m *SparseMerkleTrie) MerkleProof(index int) ([][]byte, error)` that requires `int` and not `uint64`. This `MerkleProof` function is used at a lot of places in the codebase. ==> Changing the signature of `parseIndices` is simpler than changing the signature of `MerkleProof`.
This commit is contained in:
@@ -229,13 +229,16 @@ func verifyBlobCommitmentCount(slot primitives.Slot, body interfaces.ReadOnlyBea
|
||||
if body.Version() < version.Deneb {
|
||||
return nil
|
||||
}
|
||||
|
||||
kzgs, err := body.BlobKzgCommitments()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if len(kzgs) > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: %d", len(kzgs))
|
||||
|
||||
commitmentCount, maxBlobsPerBlock := len(kzgs), params.BeaconConfig().MaxBlobsPerBlock(slot)
|
||||
if commitmentCount > maxBlobsPerBlock {
|
||||
return fmt.Errorf("too many kzg commitments in block: actual count %d - max allowed %d", commitmentCount, maxBlobsPerBlock)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -926,8 +926,10 @@ func TestVerifyBlobCommitmentCount(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1)}}
|
||||
maxCommitmentsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())
|
||||
|
||||
b = ðpb.BeaconBlockDeneb{Body: ðpb.BeaconBlockBodyDeneb{BlobKzgCommitments: make([][]byte, maxCommitmentsPerBlock+1)}}
|
||||
rb, err = consensusblocks.NewBeaconBlock(b)
|
||||
require.NoError(t, err)
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: %d", params.BeaconConfig().MaxBlobsPerBlock(rb.Slot())+1), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
require.ErrorContains(t, fmt.Sprintf("too many kzg commitments in block: actual count %d - max allowed %d", maxCommitmentsPerBlock+1, maxCommitmentsPerBlock), blocks.VerifyBlobCommitmentCount(rb.Slot(), rb.Body()))
|
||||
}
|
||||
|
||||
@@ -80,7 +80,7 @@ func TestPersist(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(lazilyPersistentStoreColumns.cache.entries))
|
||||
|
||||
key := cacheKey{slot: 0, root: [32]byte{}}
|
||||
key := cacheKey{slot: 0, root: [fieldparams.RootLength]byte{}}
|
||||
entry := lazilyPersistentStoreColumns.cache.entries[key]
|
||||
|
||||
// A call to Persist does NOT save the sidecars to disk.
|
||||
|
||||
@@ -221,7 +221,6 @@ func generateDataColumnIdentifiers(n int) []*eth.DataColumnsByRootIdentifier {
|
||||
|
||||
func TestDataColumnSidecarsByRootReq_Marshal(t *testing.T) {
|
||||
/*
|
||||
|
||||
SSZ encoding of DataColumnsByRootIdentifiers is tested in spectests.
|
||||
However, encoding a list of DataColumnsByRootIdentifier is not.
|
||||
We are testing it here.
|
||||
|
||||
@@ -201,6 +201,11 @@ func ConvertPeerIDToNodeID(pid peer.ID) (enode.ID, error) {
|
||||
return [32]byte{}, errors.Wrap(err, "parse public key")
|
||||
}
|
||||
|
||||
newPubkey := &ecdsa.PublicKey{Curve: gCrypto.S256(), X: pubKeyObjSecp256k1.X(), Y: pubKeyObjSecp256k1.Y()}
|
||||
newPubkey := &ecdsa.PublicKey{
|
||||
Curve: gCrypto.S256(),
|
||||
X: pubKeyObjSecp256k1.X(),
|
||||
Y: pubKeyObjSecp256k1.Y(),
|
||||
}
|
||||
|
||||
return enode.PubkeyToIDV4(newPubkey), nil
|
||||
}
|
||||
|
||||
@@ -97,18 +97,19 @@ func (s *Server) Blobs(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
// parseIndices filters out invalid and duplicate blob indices
|
||||
func parseIndices(url *url.URL, s primitives.Slot) ([]uint64, error) {
|
||||
func parseIndices(url *url.URL, s primitives.Slot) ([]int, error) {
|
||||
maxBlobsPerBlock := params.BeaconConfig().MaxBlobsPerBlock(s)
|
||||
rawIndices := url.Query()["indices"]
|
||||
indices := make([]uint64, 0, params.BeaconConfig().MaxBlobsPerBlock(s))
|
||||
indices := make([]int, 0, maxBlobsPerBlock)
|
||||
invalidIndices := make([]string, 0)
|
||||
loop:
|
||||
for _, raw := range rawIndices {
|
||||
ix, err := strconv.ParseUint(raw, 10, 64)
|
||||
ix, err := strconv.Atoi(raw)
|
||||
if err != nil {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
if ix >= uint64(params.BeaconConfig().MaxBlobsPerBlock(s)) {
|
||||
if !(0 <= ix && ix < maxBlobsPerBlock) {
|
||||
invalidIndices = append(invalidIndices, raw)
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -520,13 +520,13 @@ func Test_parseIndices(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
query string
|
||||
want []uint64
|
||||
want []int
|
||||
wantErr string
|
||||
}{
|
||||
{
|
||||
name: "happy path with duplicate indices within bound and other query parameters ignored",
|
||||
query: "indices=1&indices=2&indices=1&indices=3&bar=bar",
|
||||
want: []uint64{1, 2, 3},
|
||||
want: []int{1, 2, 3},
|
||||
},
|
||||
{
|
||||
name: "out of bounds indices throws error",
|
||||
|
||||
@@ -40,7 +40,7 @@ func (e BlockIdParseError) Error() string {
|
||||
// Blocker is responsible for retrieving blocks.
|
||||
type Blocker interface {
|
||||
Block(ctx context.Context, id []byte) (interfaces.ReadOnlySignedBeaconBlock, error)
|
||||
Blobs(ctx context.Context, id string, indices []uint64) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError)
|
||||
}
|
||||
|
||||
// BeaconDbBlocker is an implementation of Blocker. It retrieves blocks from the beacon chain database.
|
||||
@@ -144,7 +144,7 @@ func (p *BeaconDbBlocker) Block(ctx context.Context, id []byte) (interfaces.Read
|
||||
// - block exists, has commitments, inside retention period (greater of protocol- or user-specified) serve then w/ 200 unless we hit an error reading them.
|
||||
// we are technically not supposed to import a block to forkchoice unless we have the blobs, so the nuance here is if we can't find the file and we are inside the protocol-defined retention period, then it's actually a 500.
|
||||
// - block exists, has commitments, outside retention period (greater of protocol- or user-specified) - ie just like block exists, no commitment
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
var rootSlice []byte
|
||||
switch id {
|
||||
case "genesis":
|
||||
@@ -239,18 +239,18 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
if len(indices) == 0 {
|
||||
for i := range commitments {
|
||||
if sum.HasIndex(uint64(i)) {
|
||||
indices = append(indices, uint64(i))
|
||||
indices = append(indices, i)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for _, ix := range indices {
|
||||
if ix >= sum.MaxBlobsForEpoch() {
|
||||
if uint64(ix) >= sum.MaxBlobsForEpoch() {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d is bigger than the maximum possible blob count %d", ix, sum.MaxBlobsForEpoch()),
|
||||
Reason: core.BadRequest,
|
||||
}
|
||||
}
|
||||
if !sum.HasIndex(ix) {
|
||||
if !sum.HasIndex(uint64(ix)) {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("requested index %d not found", ix),
|
||||
Reason: core.NotFound,
|
||||
@@ -261,7 +261,7 @@ func (p *BeaconDbBlocker) Blobs(ctx context.Context, id string, indices []uint64
|
||||
|
||||
blobs := make([]*blocks.VerifiedROBlob, len(indices))
|
||||
for i, index := range indices {
|
||||
vblob, err := p.BlobStorage.Get(root, index)
|
||||
vblob, err := p.BlobStorage.Get(root, uint64(index))
|
||||
if err != nil {
|
||||
return nil, &core.RpcError{
|
||||
Err: fmt.Errorf("could not retrieve blob for block root %#x at index %d", rootSlice, index),
|
||||
|
||||
@@ -18,7 +18,7 @@ import (
|
||||
"github.com/OffchainLabs/prysm/v6/config/params"
|
||||
"github.com/OffchainLabs/prysm/v6/consensus-types/blocks"
|
||||
"github.com/OffchainLabs/prysm/v6/encoding/bytesutil"
|
||||
ethpbalpha "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
ethpb "github.com/OffchainLabs/prysm/v6/proto/prysm/v1alpha1"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/assert"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/require"
|
||||
"github.com/OffchainLabs/prysm/v6/testing/util"
|
||||
@@ -51,7 +51,7 @@ func TestGetBlock(t *testing.T) {
|
||||
b4.Block.ParentRoot = bytesutil.PadTo([]byte{8}, 32)
|
||||
util.SaveBlock(t, ctx, beaconDB, b4)
|
||||
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
wsb, err := blocks.NewSignedBeaconBlock(headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block)
|
||||
require.NoError(t, err)
|
||||
|
||||
fetcher := &BeaconDbBlocker{
|
||||
@@ -60,7 +60,7 @@ func TestGetBlock(t *testing.T) {
|
||||
DB: beaconDB,
|
||||
Block: wsb,
|
||||
Root: headBlock.BlockRoot,
|
||||
FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
FinalizedCheckPoint: ðpb.Checkpoint{Root: blkContainers[64].BlockRoot},
|
||||
CanonicalRoots: canonicalRoots,
|
||||
},
|
||||
}
|
||||
@@ -71,13 +71,13 @@ func TestGetBlock(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
blockID []byte
|
||||
want *ethpbalpha.SignedBeaconBlock
|
||||
want *ethpb.SignedBeaconBlock
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
name: "slot",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[30].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "bad formatting",
|
||||
@@ -87,7 +87,7 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "canonical",
|
||||
blockID: []byte("30"),
|
||||
want: blkContainers[30].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[30].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non canonical",
|
||||
@@ -97,12 +97,12 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "head",
|
||||
blockID: []byte("head"),
|
||||
want: headBlock.Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: headBlock.Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "finalized",
|
||||
blockID: []byte("finalized"),
|
||||
want: blkContainers[64].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[64].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "genesis",
|
||||
@@ -117,7 +117,7 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "root",
|
||||
blockID: blkContainers[20].BlockRoot,
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[20].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "non-existent root",
|
||||
@@ -127,7 +127,7 @@ func TestGetBlock(t *testing.T) {
|
||||
{
|
||||
name: "hex",
|
||||
blockID: []byte(hexutil.Encode(blkContainers[20].BlockRoot)),
|
||||
want: blkContainers[20].Block.(*ethpbalpha.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
want: blkContainers[20].Block.(*ethpb.BeaconBlockContainer_Phase0Block).Phase0Block,
|
||||
},
|
||||
{
|
||||
name: "no block",
|
||||
@@ -149,7 +149,7 @@ func TestGetBlock(t *testing.T) {
|
||||
require.NoError(t, err)
|
||||
pb, err := result.Proto()
|
||||
require.NoError(t, err)
|
||||
pbBlock, ok := pb.(*ethpbalpha.SignedBeaconBlock)
|
||||
pbBlock, ok := pb.(*ethpb.SignedBeaconBlock)
|
||||
require.Equal(t, true, ok)
|
||||
if !reflect.DeepEqual(pbBlock, tt.want) {
|
||||
t.Error("Expected blocks to equal")
|
||||
@@ -218,7 +218,7 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("finalized", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -232,7 +232,7 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("justified", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{CurrentJustifiedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -270,14 +270,14 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("one blob only", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []uint64{2})
|
||||
verifiedBlobs, rpcErr := blocker.Blobs(ctx, "123", []int{2})
|
||||
assert.Equal(t, rpcErr == nil, true)
|
||||
require.Equal(t, 1, len(verifiedBlobs))
|
||||
sidecar := verifiedBlobs[0].BlobSidecar
|
||||
@@ -289,7 +289,7 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("no blobs returns an empty array", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
@@ -302,28 +302,28 @@ func TestGetBlob(t *testing.T) {
|
||||
})
|
||||
t.Run("no blob at index", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
noBlobIndex := uint64(len(blobs)) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []uint64{0, noBlobIndex})
|
||||
noBlobIndex := len(blobs) + 1
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, noBlobIndex})
|
||||
require.NotNil(t, rpcErr)
|
||||
assert.Equal(t, core.ErrorReason(core.NotFound), rpcErr.Reason)
|
||||
})
|
||||
t.Run("index too big", func(t *testing.T) {
|
||||
blocker := &BeaconDbBlocker{
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpbalpha.Checkpoint{Root: blockRoot[:]}},
|
||||
ChainInfoFetcher: &mockChain.ChainService{FinalizedCheckPoint: ðpb.Checkpoint{Root: blockRoot[:]}},
|
||||
GenesisTimeFetcher: &testutil.MockGenesisTimeFetcher{
|
||||
Genesis: time.Now(),
|
||||
},
|
||||
BeaconDB: db,
|
||||
BlobStorage: bs,
|
||||
}
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []uint64{0, math.MaxUint})
|
||||
_, rpcErr := blocker.Blobs(ctx, "123", []int{0, math.MaxInt})
|
||||
require.NotNil(t, rpcErr)
|
||||
assert.Equal(t, core.ErrorReason(core.BadRequest), rpcErr.Reason)
|
||||
})
|
||||
|
||||
@@ -36,6 +36,6 @@ func (m *MockBlocker) Block(_ context.Context, b []byte) (interfaces.ReadOnlySig
|
||||
}
|
||||
|
||||
// Blobs --
|
||||
func (m *MockBlocker) Blobs(_ context.Context, _ string, _ []uint64) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
panic("implement me") // lint:nopanic -- Test code.
|
||||
func (*MockBlocker) Blobs(_ context.Context, _ string, _ []int) ([]*blocks.VerifiedROBlob, *core.RpcError) {
|
||||
return nil, &core.RpcError{}
|
||||
}
|
||||
|
||||
12
changelog/manu-peerdas-small.md
Normal file
12
changelog/manu-peerdas-small.md
Normal file
@@ -0,0 +1,12 @@
|
||||
### Added
|
||||
- `verifyBlobCommitmentCount`: Print max allowed blob count in error message.
|
||||
|
||||
|
||||
### Ignored
|
||||
- `TestPersist`: Use `fieldparams.RootLength` instead of `32`.
|
||||
- `TestDataColumnSidecarsByRootReq_Marshal`: Remove blank line.
|
||||
- `ConvertPeerIDToNodeID`: Improve readability by using one line per field.
|
||||
|
||||
### Changed
|
||||
- `parseIndices`: Return `[]int` instead of `[]uint64`.
|
||||
|
||||
Reference in New Issue
Block a user