mirror of
https://github.com/OffchainLabs/prysm.git
synced 2026-01-29 15:18:10 -05:00
Compare commits
4 Commits
debug-stat
...
develop
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a7fdd11777 | ||
|
|
919bd5d6aa | ||
|
|
0476eeda57 | ||
|
|
1c65c8866a |
@@ -114,17 +114,32 @@ func payloadCommittee(ctx context.Context, st state.ReadOnlyBeaconState, slot pr
|
|||||||
}
|
}
|
||||||
|
|
||||||
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
committeesPerSlot := helpers.SlotCommitteeCount(activeCount)
|
||||||
out := make([]primitives.ValidatorIndex, 0, activeCount/uint64(params.BeaconConfig().SlotsPerEpoch))
|
|
||||||
|
|
||||||
for i := primitives.CommitteeIndex(0); i < primitives.CommitteeIndex(committeesPerSlot); i++ {
|
selected := make([]primitives.ValidatorIndex, 0, fieldparams.PTCSize)
|
||||||
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, i)
|
var i uint64
|
||||||
if err != nil {
|
for uint64(len(selected)) < fieldparams.PTCSize {
|
||||||
return nil, errors.Wrapf(err, "failed to get beacon committee %d", i)
|
if ctx.Err() != nil {
|
||||||
|
return nil, ctx.Err()
|
||||||
|
}
|
||||||
|
|
||||||
|
for committeeIndex := primitives.CommitteeIndex(0); committeeIndex < primitives.CommitteeIndex(committeesPerSlot); committeeIndex++ {
|
||||||
|
if uint64(len(selected)) >= fieldparams.PTCSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
|
||||||
|
committee, err := helpers.BeaconCommitteeFromState(ctx, st, slot, committeeIndex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to get beacon committee %d", committeeIndex)
|
||||||
|
}
|
||||||
|
|
||||||
|
selected, i, err = selectByBalanceFill(ctx, st, committee, seed, selected, i)
|
||||||
|
if err != nil {
|
||||||
|
return nil, errors.Wrapf(err, "failed to sample beacon committee %d", committeeIndex)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
out = append(out, committee...)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return selectByBalance(ctx, st, out, seed, fieldparams.PTCSize)
|
return selected, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ptcSeed computes the seed for the payload timeliness committee.
|
// ptcSeed computes the seed for the payload timeliness committee.
|
||||||
@@ -148,33 +163,39 @@ func ptcSeed(st state.ReadOnlyBeaconState, epoch primitives.Epoch, slot primitiv
|
|||||||
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
// if compute_balance_weighted_acceptance(state, indices[next], seed, i):
|
||||||
// selected.append(indices[next])
|
// selected.append(indices[next])
|
||||||
// i += 1
|
// i += 1
|
||||||
func selectByBalance(ctx context.Context, st state.ReadOnlyBeaconState, candidates []primitives.ValidatorIndex, seed [32]byte, count uint64) ([]primitives.ValidatorIndex, error) {
|
func selectByBalanceFill(
|
||||||
if len(candidates) == 0 {
|
ctx context.Context,
|
||||||
return nil, errors.New("no candidates for balance weighted selection")
|
st state.ReadOnlyBeaconState,
|
||||||
}
|
candidates []primitives.ValidatorIndex,
|
||||||
|
seed [32]byte,
|
||||||
|
selected []primitives.ValidatorIndex,
|
||||||
|
i uint64,
|
||||||
|
) ([]primitives.ValidatorIndex, uint64, error) {
|
||||||
hashFunc := hash.CustomSHA256Hasher()
|
hashFunc := hash.CustomSHA256Hasher()
|
||||||
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
// Pre-allocate buffer for hash input: seed (32 bytes) + round counter (8 bytes).
|
||||||
var buf [40]byte
|
var buf [40]byte
|
||||||
copy(buf[:], seed[:])
|
copy(buf[:], seed[:])
|
||||||
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
maxBalance := params.BeaconConfig().MaxEffectiveBalanceElectra
|
||||||
|
|
||||||
selected := make([]primitives.ValidatorIndex, 0, count)
|
for _, idx := range candidates {
|
||||||
total := uint64(len(candidates))
|
|
||||||
for i := uint64(0); uint64(len(selected)) < count; i++ {
|
|
||||||
if ctx.Err() != nil {
|
if ctx.Err() != nil {
|
||||||
return nil, ctx.Err()
|
return nil, i, ctx.Err()
|
||||||
}
|
}
|
||||||
idx := candidates[i%total]
|
|
||||||
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
ok, err := acceptByBalance(st, idx, buf[:], hashFunc, maxBalance, i)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, i, err
|
||||||
}
|
}
|
||||||
if ok {
|
if ok {
|
||||||
selected = append(selected, idx)
|
selected = append(selected, idx)
|
||||||
}
|
}
|
||||||
|
if uint64(len(selected)) == fieldparams.PTCSize {
|
||||||
|
break
|
||||||
|
}
|
||||||
|
i++
|
||||||
}
|
}
|
||||||
return selected, nil
|
|
||||||
|
return selected, i, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
// acceptByBalance determines if a validator is accepted based on its effective balance.
|
||||||
|
|||||||
@@ -48,6 +48,7 @@ go_test(
|
|||||||
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
"@com_github_ethereum_go_ethereum//crypto:go_default_library",
|
||||||
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
"@com_github_ethereum_go_ethereum//p2p/enode:go_default_library",
|
||||||
"@org_golang_google_grpc//:go_default_library",
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
|
"@org_golang_google_grpc//metadata:go_default_library",
|
||||||
"@org_golang_google_grpc//reflection:go_default_library",
|
"@org_golang_google_grpc//reflection:go_default_library",
|
||||||
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
"@org_golang_google_protobuf//types/known/emptypb:go_default_library",
|
||||||
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
"@org_golang_google_protobuf//types/known/timestamppb:go_default_library",
|
||||||
|
|||||||
@@ -35,18 +35,19 @@ import (
|
|||||||
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
// providing RPC endpoints for verifying a beacon node's sync status, genesis and
|
||||||
// version information, and services the node implements and runs.
|
// version information, and services the node implements and runs.
|
||||||
type Server struct {
|
type Server struct {
|
||||||
LogsStreamer logs.Streamer
|
LogsStreamer logs.Streamer
|
||||||
StreamLogsBufferSize int
|
StreamLogsBufferSize int
|
||||||
SyncChecker sync.Checker
|
SyncChecker sync.Checker
|
||||||
Server *grpc.Server
|
Server *grpc.Server
|
||||||
BeaconDB db.ReadOnlyDatabase
|
BeaconDB db.ReadOnlyDatabase
|
||||||
PeersFetcher p2p.PeersProvider
|
PeersFetcher p2p.PeersProvider
|
||||||
PeerManager p2p.PeerManager
|
PeerManager p2p.PeerManager
|
||||||
GenesisTimeFetcher blockchain.TimeFetcher
|
GenesisTimeFetcher blockchain.TimeFetcher
|
||||||
GenesisFetcher blockchain.GenesisFetcher
|
GenesisFetcher blockchain.GenesisFetcher
|
||||||
POWChainInfoFetcher execution.ChainInfoFetcher
|
POWChainInfoFetcher execution.ChainInfoFetcher
|
||||||
BeaconMonitoringHost string
|
BeaconMonitoringHost string
|
||||||
BeaconMonitoringPort int
|
BeaconMonitoringPort int
|
||||||
|
OptimisticModeFetcher blockchain.OptimisticModeFetcher
|
||||||
}
|
}
|
||||||
|
|
||||||
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
// Deprecated: The gRPC API will remain the default and fully supported through v8 (expected in 2026) but will be eventually removed in favor of REST API.
|
||||||
@@ -61,21 +62,28 @@ func (ns *Server) GetHealth(ctx context.Context, request *ethpb.HealthRequest) (
|
|||||||
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
ctx, cancel := context.WithTimeout(ctx, timeoutDuration)
|
||||||
defer cancel() // Important to avoid a context leak
|
defer cancel() // Important to avoid a context leak
|
||||||
|
|
||||||
if ns.SyncChecker.Synced() {
|
// Check optimistic status - validators should not participate when optimistic
|
||||||
|
isOptimistic, err := ns.OptimisticModeFetcher.IsOptimistic(ctx)
|
||||||
|
if err != nil {
|
||||||
|
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not check optimistic status: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if ns.SyncChecker.Synced() && !isOptimistic {
|
||||||
return &empty.Empty{}, nil
|
return &empty.Empty{}, nil
|
||||||
}
|
}
|
||||||
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
if ns.SyncChecker.Syncing() || ns.SyncChecker.Initialized() {
|
||||||
if request.SyncingStatus != 0 {
|
// Set header for REST API clients (via gRPC-gateway)
|
||||||
// override the 200 success with the provided request status
|
|
||||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(request.SyncingStatus, 10))); err != nil {
|
|
||||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
|
||||||
}
|
|
||||||
return &empty.Empty{}, nil
|
|
||||||
}
|
|
||||||
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||||
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set custom success code header: %v", err)
|
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||||
}
|
}
|
||||||
return &empty.Empty{}, nil
|
return &empty.Empty{}, status.Error(codes.Unavailable, "node is syncing")
|
||||||
|
}
|
||||||
|
if isOptimistic {
|
||||||
|
// Set header for REST API clients (via gRPC-gateway)
|
||||||
|
if err := grpc.SetHeader(ctx, metadata.Pairs("x-http-code", strconv.FormatUint(http.StatusPartialContent, 10))); err != nil {
|
||||||
|
return &empty.Empty{}, status.Errorf(codes.Internal, "Could not set status code header: %v", err)
|
||||||
|
}
|
||||||
|
return &empty.Empty{}, status.Error(codes.Unavailable, "node is optimistic")
|
||||||
}
|
}
|
||||||
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
return &empty.Empty{}, status.Errorf(codes.Unavailable, "service unavailable")
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -2,6 +2,7 @@ package node
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"maps"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
@@ -21,6 +22,7 @@ import (
|
|||||||
"github.com/ethereum/go-ethereum/crypto"
|
"github.com/ethereum/go-ethereum/crypto"
|
||||||
"github.com/ethereum/go-ethereum/p2p/enode"
|
"github.com/ethereum/go-ethereum/p2p/enode"
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/reflection"
|
"google.golang.org/grpc/reflection"
|
||||||
"google.golang.org/protobuf/types/known/emptypb"
|
"google.golang.org/protobuf/types/known/emptypb"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
@@ -187,32 +189,71 @@ func TestNodeServer_GetETH1ConnectionStatus(t *testing.T) {
|
|||||||
assert.Equal(t, errStr, res.CurrentConnectionError)
|
assert.Equal(t, errStr, res.CurrentConnectionError)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// mockServerTransportStream implements grpc.ServerTransportStream for testing
|
||||||
|
type mockServerTransportStream struct {
|
||||||
|
headers map[string][]string
|
||||||
|
}
|
||||||
|
|
||||||
|
func (m *mockServerTransportStream) Method() string { return "" }
|
||||||
|
func (m *mockServerTransportStream) SetHeader(md metadata.MD) error {
|
||||||
|
maps.Copy(m.headers, md)
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
func (m *mockServerTransportStream) SendHeader(metadata.MD) error { return nil }
|
||||||
|
func (m *mockServerTransportStream) SetTrailer(metadata.MD) error { return nil }
|
||||||
|
|
||||||
func TestNodeServer_GetHealth(t *testing.T) {
|
func TestNodeServer_GetHealth(t *testing.T) {
|
||||||
tests := []struct {
|
tests := []struct {
|
||||||
name string
|
name string
|
||||||
input *mockSync.Sync
|
input *mockSync.Sync
|
||||||
customStatus uint64
|
isOptimistic bool
|
||||||
wantedErr string
|
wantedErr string
|
||||||
}{
|
}{
|
||||||
{
|
{
|
||||||
name: "happy path",
|
name: "happy path - synced and not optimistic",
|
||||||
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||||
|
isOptimistic: false,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
name: "syncing",
|
name: "returns error when not synced and not syncing",
|
||||||
input: &mockSync.Sync{IsSyncing: false},
|
input: &mockSync.Sync{IsSyncing: false, IsSynced: false},
|
||||||
wantedErr: "service unavailable",
|
isOptimistic: false,
|
||||||
|
wantedErr: "service unavailable",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "returns error when syncing",
|
||||||
|
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||||
|
isOptimistic: false,
|
||||||
|
wantedErr: "node is syncing",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "returns error when synced but optimistic",
|
||||||
|
input: &mockSync.Sync{IsSyncing: false, IsSynced: true},
|
||||||
|
isOptimistic: true,
|
||||||
|
wantedErr: "node is optimistic",
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "returns error when syncing and optimistic",
|
||||||
|
input: &mockSync.Sync{IsSyncing: true, IsSynced: false},
|
||||||
|
isOptimistic: true,
|
||||||
|
wantedErr: "node is syncing",
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
for _, tt := range tests {
|
for _, tt := range tests {
|
||||||
t.Run(tt.name, func(t *testing.T) {
|
t.Run(tt.name, func(t *testing.T) {
|
||||||
server := grpc.NewServer()
|
server := grpc.NewServer()
|
||||||
ns := &Server{
|
ns := &Server{
|
||||||
SyncChecker: tt.input,
|
SyncChecker: tt.input,
|
||||||
|
OptimisticModeFetcher: &mock.ChainService{Optimistic: tt.isOptimistic},
|
||||||
}
|
}
|
||||||
ethpb.RegisterNodeServer(server, ns)
|
ethpb.RegisterNodeServer(server, ns)
|
||||||
reflection.Register(server)
|
reflection.Register(server)
|
||||||
_, err := ns.GetHealth(t.Context(), ðpb.HealthRequest{SyncingStatus: tt.customStatus})
|
|
||||||
|
// Create context with mock transport stream so grpc.SetHeader works
|
||||||
|
stream := &mockServerTransportStream{headers: make(map[string][]string)}
|
||||||
|
ctx := grpc.NewContextWithServerTransportStream(t.Context(), stream)
|
||||||
|
|
||||||
|
_, err := ns.GetHealth(ctx, ðpb.HealthRequest{})
|
||||||
if tt.wantedErr == "" {
|
if tt.wantedErr == "" {
|
||||||
require.NoError(t, err)
|
require.NoError(t, err)
|
||||||
return
|
return
|
||||||
|
|||||||
@@ -259,18 +259,19 @@ func NewService(ctx context.Context, cfg *Config) *Service {
|
|||||||
}
|
}
|
||||||
s.validatorServer = validatorServer
|
s.validatorServer = validatorServer
|
||||||
nodeServer := &nodev1alpha1.Server{
|
nodeServer := &nodev1alpha1.Server{
|
||||||
LogsStreamer: logs.NewStreamServer(),
|
LogsStreamer: logs.NewStreamServer(),
|
||||||
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
StreamLogsBufferSize: 1000, // Enough to handle bursts of beacon node logs for gRPC streaming.
|
||||||
BeaconDB: s.cfg.BeaconDB,
|
BeaconDB: s.cfg.BeaconDB,
|
||||||
Server: s.grpcServer,
|
Server: s.grpcServer,
|
||||||
SyncChecker: s.cfg.SyncService,
|
SyncChecker: s.cfg.SyncService,
|
||||||
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
GenesisTimeFetcher: s.cfg.GenesisTimeFetcher,
|
||||||
PeersFetcher: s.cfg.PeersFetcher,
|
PeersFetcher: s.cfg.PeersFetcher,
|
||||||
PeerManager: s.cfg.PeerManager,
|
PeerManager: s.cfg.PeerManager,
|
||||||
GenesisFetcher: s.cfg.GenesisFetcher,
|
GenesisFetcher: s.cfg.GenesisFetcher,
|
||||||
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
POWChainInfoFetcher: s.cfg.ExecutionChainInfoFetcher,
|
||||||
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
BeaconMonitoringHost: s.cfg.BeaconMonitoringHost,
|
||||||
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
BeaconMonitoringPort: s.cfg.BeaconMonitoringPort,
|
||||||
|
OptimisticModeFetcher: s.cfg.OptimisticModeFetcher,
|
||||||
}
|
}
|
||||||
beaconChainServer := &beaconv1alpha1.Server{
|
beaconChainServer := &beaconv1alpha1.Server{
|
||||||
Ctx: s.ctx,
|
Ctx: s.ctx,
|
||||||
|
|||||||
3
changelog/farazdagi_fix-hashtree-darwin-amd64.md
Normal file
3
changelog/farazdagi_fix-hashtree-darwin-amd64.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
### Fixed
|
||||||
|
|
||||||
|
- Fix Bazel build failure on macOS x86_64 (darwin_amd64) (adds missing assembly stub to hashtree patch).
|
||||||
@@ -0,0 +1,6 @@
|
|||||||
|
### Added
|
||||||
|
|
||||||
|
- Added new proofCollector type to ssz-query
|
||||||
|
|
||||||
|
### Ignored
|
||||||
|
- Added testing covering the production of Merkle proof from Phase0 beacon state and benchmarked against real Hoodi beacon state (Fulu version)
|
||||||
3
changelog/james-prysm_update-health-endpoint.md
Normal file
3
changelog/james-prysm_update-health-endpoint.md
Normal file
@@ -0,0 +1,3 @@
|
|||||||
|
### Changed
|
||||||
|
|
||||||
|
- gRPC health endpoint will now return an error on syncing or optimistic status showing that it's unavailable.
|
||||||
2
changelog/terencechain_gloas-ptc-sampling.md
Normal file
2
changelog/terencechain_gloas-ptc-sampling.md
Normal file
@@ -0,0 +1,2 @@
|
|||||||
|
### Changed
|
||||||
|
- Sample PTC per committee to reduce allocations.
|
||||||
@@ -163,3 +163,18 @@ func Uint256ToSSZBytes(num string) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return PadTo(ReverseByteOrder(uint256.Bytes()), 32), nil
|
return PadTo(ReverseByteOrder(uint256.Bytes()), 32), nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PutLittleEndian writes an unsigned integer value in little-endian format.
|
||||||
|
// Supports sizes 1, 2, 4, or 8 bytes for uint8/16/32/64 respectively.
|
||||||
|
func PutLittleEndian(dst []byte, val uint64, size int) {
|
||||||
|
switch size {
|
||||||
|
case 1:
|
||||||
|
dst[0] = byte(val)
|
||||||
|
case 2:
|
||||||
|
binary.LittleEndian.PutUint16(dst, uint16(val))
|
||||||
|
case 4:
|
||||||
|
binary.LittleEndian.PutUint32(dst, uint32(val))
|
||||||
|
case 8:
|
||||||
|
binary.LittleEndian.PutUint64(dst, val)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|||||||
@@ -9,7 +9,9 @@ go_library(
|
|||||||
"container.go",
|
"container.go",
|
||||||
"generalized_index.go",
|
"generalized_index.go",
|
||||||
"list.go",
|
"list.go",
|
||||||
|
"merkle_proof.go",
|
||||||
"path.go",
|
"path.go",
|
||||||
|
"proof_collector.go",
|
||||||
"query.go",
|
"query.go",
|
||||||
"ssz_info.go",
|
"ssz_info.go",
|
||||||
"ssz_object.go",
|
"ssz_object.go",
|
||||||
@@ -20,7 +22,12 @@ go_library(
|
|||||||
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz/query",
|
importpath = "github.com/OffchainLabs/prysm/v7/encoding/ssz/query",
|
||||||
visibility = ["//visibility:public"],
|
visibility = ["//visibility:public"],
|
||||||
deps = [
|
deps = [
|
||||||
|
"//container/trie:go_default_library",
|
||||||
|
"//crypto/hash/htr:go_default_library",
|
||||||
|
"//encoding/bytesutil:go_default_library",
|
||||||
"//encoding/ssz:go_default_library",
|
"//encoding/ssz:go_default_library",
|
||||||
|
"//math:go_default_library",
|
||||||
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
@@ -29,15 +36,24 @@ go_test(
|
|||||||
name = "go_default_test",
|
name = "go_default_test",
|
||||||
srcs = [
|
srcs = [
|
||||||
"generalized_index_test.go",
|
"generalized_index_test.go",
|
||||||
|
"merkle_proof_test.go",
|
||||||
"path_test.go",
|
"path_test.go",
|
||||||
|
"proof_collector_test.go",
|
||||||
"query_test.go",
|
"query_test.go",
|
||||||
"tag_parser_test.go",
|
"tag_parser_test.go",
|
||||||
],
|
],
|
||||||
|
embed = [":go_default_library"],
|
||||||
deps = [
|
deps = [
|
||||||
":go_default_library",
|
"//beacon-chain/state/stateutil:go_default_library",
|
||||||
|
"//consensus-types/blocks:go_default_library",
|
||||||
|
"//consensus-types/primitives:go_default_library",
|
||||||
|
"//encoding/ssz:go_default_library",
|
||||||
"//encoding/ssz/query/testutil:go_default_library",
|
"//encoding/ssz/query/testutil:go_default_library",
|
||||||
|
"//proto/prysm/v1alpha1:go_default_library",
|
||||||
"//proto/ssz_query/testing:go_default_library",
|
"//proto/ssz_query/testing:go_default_library",
|
||||||
"//testing/require:go_default_library",
|
"//testing/require:go_default_library",
|
||||||
|
"//testing/util:go_default_library",
|
||||||
|
"@com_github_prysmaticlabs_fastssz//:go_default_library",
|
||||||
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
"@com_github_prysmaticlabs_go_bitfield//:go_default_library",
|
||||||
],
|
],
|
||||||
)
|
)
|
||||||
|
|||||||
34
encoding/ssz/query/merkle_proof.go
Normal file
34
encoding/ssz/query/merkle_proof.go
Normal file
@@ -0,0 +1,34 @@
|
|||||||
|
package query
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"reflect"
|
||||||
|
|
||||||
|
fastssz "github.com/prysmaticlabs/fastssz"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Prove is the entrypoint to generate an SSZ Merkle proof for the given generalized index.
|
||||||
|
// Parameters:
|
||||||
|
// - gindex: the generalized index of the node to prove inclusion for.
|
||||||
|
// Returns:
|
||||||
|
// - fastssz.Proof: the Merkle proof containing the leaf, index, and sibling hashes.
|
||||||
|
// - error: any error encountered during proof generation.
|
||||||
|
func (info *SszInfo) Prove(gindex uint64) (*fastssz.Proof, error) {
|
||||||
|
if info == nil {
|
||||||
|
return nil, fmt.Errorf("nil SszInfo")
|
||||||
|
}
|
||||||
|
|
||||||
|
collector := newProofCollector()
|
||||||
|
collector.addTarget(gindex)
|
||||||
|
|
||||||
|
// info.source is guaranteed to be valid and dereferenced by AnalyzeObject
|
||||||
|
v := reflect.ValueOf(info.source).Elem()
|
||||||
|
|
||||||
|
// Start the merkleization and proof collection process.
|
||||||
|
// In SSZ generalized indices, the root is always at index 1.
|
||||||
|
if _, err := collector.merkleize(info, v, 1); err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
|
||||||
|
return collector.toProof()
|
||||||
|
}
|
||||||
163
encoding/ssz/query/merkle_proof_test.go
Normal file
163
encoding/ssz/query/merkle_proof_test.go
Normal file
@@ -0,0 +1,163 @@
|
|||||||
|
package query_test
|
||||||
|
|
||||||
|
import (
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/go-bitfield"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/blocks"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/ssz/query"
|
||||||
|
eth "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/util"
|
||||||
|
ssz "github.com/prysmaticlabs/fastssz"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProve_FixedTestContainer(t *testing.T) {
|
||||||
|
obj := createFixedTestContainer()
|
||||||
|
|
||||||
|
tests := []string{
|
||||||
|
".field_uint32",
|
||||||
|
".nested.value2",
|
||||||
|
".vector_field[3]",
|
||||||
|
".bitvector64_field",
|
||||||
|
".trailing_field",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc, func(t *testing.T) {
|
||||||
|
proveAndVerify(t, obj, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProve_VariableTestContainer(t *testing.T) {
|
||||||
|
obj := createVariableTestContainer()
|
||||||
|
|
||||||
|
tests := []string{
|
||||||
|
".leading_field",
|
||||||
|
".field_list_uint64[2]",
|
||||||
|
"len(field_list_uint64)",
|
||||||
|
".nested.nested_list_field[1]",
|
||||||
|
".variable_container_list[0].inner_1.field_list_uint64[1]",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc, func(t *testing.T) {
|
||||||
|
proveAndVerify(t, obj, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProve_BeaconBlock(t *testing.T) {
|
||||||
|
randaoReveal := make([]byte, 96)
|
||||||
|
for i := range randaoReveal {
|
||||||
|
randaoReveal[i] = 0x42
|
||||||
|
}
|
||||||
|
root32 := make([]byte, 32)
|
||||||
|
for i := range root32 {
|
||||||
|
root32[i] = 0x24
|
||||||
|
}
|
||||||
|
sig := make([]byte, 96)
|
||||||
|
for i := range sig {
|
||||||
|
sig[i] = 0x99
|
||||||
|
}
|
||||||
|
|
||||||
|
att := ð.Attestation{
|
||||||
|
AggregationBits: bitfield.Bitlist{0x01},
|
||||||
|
Data: ð.AttestationData{
|
||||||
|
Slot: 1,
|
||||||
|
CommitteeIndex: 1,
|
||||||
|
BeaconBlockRoot: root32,
|
||||||
|
Source: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: root32,
|
||||||
|
},
|
||||||
|
Target: ð.Checkpoint{
|
||||||
|
Epoch: 1,
|
||||||
|
Root: root32,
|
||||||
|
},
|
||||||
|
},
|
||||||
|
Signature: sig,
|
||||||
|
}
|
||||||
|
|
||||||
|
b := util.NewBeaconBlock()
|
||||||
|
b.Block.Slot = 123
|
||||||
|
b.Block.Body.RandaoReveal = randaoReveal
|
||||||
|
b.Block.Body.Attestations = []*eth.Attestation{att}
|
||||||
|
|
||||||
|
sb, err := blocks.NewSignedBeaconBlock(b)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
protoBlock, err := sb.Block().Proto()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
obj, ok := protoBlock.(query.SSZObject)
|
||||||
|
require.Equal(t, true, ok, "block proto does not implement query.SSZObject")
|
||||||
|
|
||||||
|
tests := []string{
|
||||||
|
".slot",
|
||||||
|
".body.randao_reveal",
|
||||||
|
".body.attestations[0].data.slot",
|
||||||
|
"len(body.attestations)",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc, func(t *testing.T) {
|
||||||
|
proveAndVerify(t, obj, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProve_BeaconState(t *testing.T) {
|
||||||
|
st, _ := util.DeterministicGenesisState(t, 16)
|
||||||
|
require.NoError(t, st.SetSlot(primitives.Slot(42)))
|
||||||
|
|
||||||
|
sszObj, ok := st.ToProtoUnsafe().(query.SSZObject)
|
||||||
|
require.Equal(t, true, ok, "state proto does not implement query.SSZObject")
|
||||||
|
|
||||||
|
tests := []string{
|
||||||
|
".slot",
|
||||||
|
".latest_block_header",
|
||||||
|
".validators[0].effective_balance",
|
||||||
|
"len(validators)",
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range tests {
|
||||||
|
t.Run(tc, func(t *testing.T) {
|
||||||
|
proveAndVerify(t, sszObj, tc)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// proveAndVerify helper to analyze an object, generate a merkle proof for the given path,
|
||||||
|
// and verify the proof against the object's root.
|
||||||
|
func proveAndVerify(t *testing.T, obj query.SSZObject, pathStr string) {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
info, err := query.AnalyzeObject(obj)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
path, err := query.ParsePath(pathStr)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
gi, err := query.GetGeneralizedIndexFromPath(info, path)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
proof, err := info.Prove(gi)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, int(gi), proof.Index)
|
||||||
|
|
||||||
|
root, err := obj.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ok, err := ssz.VerifyProof(root[:], proof)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, true, ok, "merkle proof verification failed")
|
||||||
|
|
||||||
|
require.Equal(t, 32, len(proof.Leaf))
|
||||||
|
for i, h := range proof.Hashes {
|
||||||
|
require.Equal(t, 32, len(h), "proof hash %d is not 32 bytes", i)
|
||||||
|
}
|
||||||
|
|
||||||
|
}
|
||||||
672
encoding/ssz/query/proof_collector.go
Normal file
672
encoding/ssz/query/proof_collector.go
Normal file
@@ -0,0 +1,672 @@
|
|||||||
|
package query
|
||||||
|
|
||||||
|
import (
|
||||||
|
"encoding/binary"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/bits"
|
||||||
|
"reflect"
|
||||||
|
"runtime"
|
||||||
|
"slices"
|
||||||
|
"sync"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/go-bitfield"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/container/trie"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/crypto/hash/htr"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/encoding/bytesutil"
|
||||||
|
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/math"
|
||||||
|
fastssz "github.com/prysmaticlabs/fastssz"
|
||||||
|
)
|
||||||
|
|
||||||
|
// proofCollector collects sibling hashes and leaves needed for Merkle proofs.
|
||||||
|
//
|
||||||
|
// Multiproof-ready design:
|
||||||
|
// - requiredSiblings/requiredLeaves store which gindices we want to collect (registered before merkleization).
|
||||||
|
// - siblings/leaves store the actual collected hashes.
|
||||||
|
//
|
||||||
|
// Concurrency:
|
||||||
|
// - required* maps are read-only during merkleization.
|
||||||
|
// - siblings/leaves writes are protected by mutex.
|
||||||
|
type proofCollector struct {
|
||||||
|
sync.Mutex
|
||||||
|
|
||||||
|
// Required gindices (registered before merkleization)
|
||||||
|
requiredSiblings map[uint64]struct{}
|
||||||
|
requiredLeaves map[uint64]struct{}
|
||||||
|
|
||||||
|
// Collected hashes
|
||||||
|
siblings map[uint64][32]byte
|
||||||
|
leaves map[uint64][32]byte
|
||||||
|
}
|
||||||
|
|
||||||
|
func newProofCollector() *proofCollector {
|
||||||
|
return &proofCollector{
|
||||||
|
requiredSiblings: make(map[uint64]struct{}),
|
||||||
|
requiredLeaves: make(map[uint64]struct{}),
|
||||||
|
siblings: make(map[uint64][32]byte),
|
||||||
|
leaves: make(map[uint64][32]byte),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (pc *proofCollector) reset() {
|
||||||
|
pc.Lock()
|
||||||
|
defer pc.Unlock()
|
||||||
|
|
||||||
|
pc.requiredSiblings = make(map[uint64]struct{})
|
||||||
|
pc.requiredLeaves = make(map[uint64]struct{})
|
||||||
|
pc.siblings = make(map[uint64][32]byte)
|
||||||
|
pc.leaves = make(map[uint64][32]byte)
|
||||||
|
}
|
||||||
|
|
||||||
|
// addTarget register the target leaf and its required sibling nodes for proof construction.
|
||||||
|
// Registration should happen before merkleization begins.
|
||||||
|
func (pc *proofCollector) addTarget(gindex uint64) {
|
||||||
|
pc.Lock()
|
||||||
|
defer pc.Unlock()
|
||||||
|
|
||||||
|
pc.requiredLeaves[gindex] = struct{}{}
|
||||||
|
|
||||||
|
// Walk from the target leaf up to (but not including) the root (gindex=1).
|
||||||
|
// At each step, register the sibling node required to prove inclusion.
|
||||||
|
nodeGindex := gindex
|
||||||
|
for nodeGindex > 1 {
|
||||||
|
siblingGindex := nodeGindex ^ 1 // flip the last bit: left<->right sibling
|
||||||
|
pc.requiredSiblings[siblingGindex] = struct{}{}
|
||||||
|
|
||||||
|
// Move to parent
|
||||||
|
nodeGindex /= 2
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// toProof converts the collected siblings and leaves into a fastssz.Proof structure.
|
||||||
|
// Current behavior expects a single target leaf (single proof).
|
||||||
|
func (pc *proofCollector) toProof() (*fastssz.Proof, error) {
|
||||||
|
pc.Lock()
|
||||||
|
defer pc.Unlock()
|
||||||
|
|
||||||
|
proof := &fastssz.Proof{}
|
||||||
|
if len(pc.leaves) == 0 {
|
||||||
|
return nil, errors.New("no leaves collected: add target leaves before merkleization")
|
||||||
|
}
|
||||||
|
|
||||||
|
leafGindices := make([]uint64, 0, len(pc.leaves))
|
||||||
|
for g := range pc.leaves {
|
||||||
|
leafGindices = append(leafGindices, g)
|
||||||
|
}
|
||||||
|
slices.Sort(leafGindices)
|
||||||
|
|
||||||
|
// single proof resides in leafGindices[0]
|
||||||
|
targetGindex := leafGindices[0]
|
||||||
|
proofIndex, err := math.Int(targetGindex)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("gindex %d overflows int: %w", targetGindex, err)
|
||||||
|
}
|
||||||
|
proof.Index = proofIndex
|
||||||
|
|
||||||
|
// store the leaf
|
||||||
|
leaf := pc.leaves[targetGindex]
|
||||||
|
leafBuf := make([]byte, 32)
|
||||||
|
copy(leafBuf, leaf[:])
|
||||||
|
proof.Leaf = leafBuf
|
||||||
|
|
||||||
|
// Walk from target up to root, collecting siblings.
|
||||||
|
steps := bits.Len64(targetGindex) - 1
|
||||||
|
proof.Hashes = make([][]byte, 0, steps)
|
||||||
|
|
||||||
|
for targetGindex > 1 {
|
||||||
|
sib := targetGindex ^ 1
|
||||||
|
h, ok := pc.siblings[sib]
|
||||||
|
if !ok {
|
||||||
|
return nil, fmt.Errorf("missing sibling hash for gindex %d", sib)
|
||||||
|
}
|
||||||
|
proof.Hashes = append(proof.Hashes, h[:])
|
||||||
|
targetGindex /= 2
|
||||||
|
}
|
||||||
|
|
||||||
|
return proof, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectLeaf checks if the given gindex is a required leaf for the proof,
|
||||||
|
// and if so, stores the provided leaf hash in the collector.
|
||||||
|
func (pc *proofCollector) collectLeaf(gindex uint64, leaf [32]byte) {
|
||||||
|
if _, ok := pc.requiredLeaves[gindex]; !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pc.Lock()
|
||||||
|
pc.leaves[gindex] = leaf
|
||||||
|
pc.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// collectSibling stores the hash for a sibling node identified by gindex.
|
||||||
|
// It only stores the hash if gindex was pre-registered via addTarget (present in requiredSiblings).
|
||||||
|
// Writes to the collected siblings map are protected by the collector mutex.
|
||||||
|
func (pc *proofCollector) collectSibling(gindex uint64, hash [32]byte) {
|
||||||
|
if _, ok := pc.requiredSiblings[gindex]; !ok {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
pc.Lock()
|
||||||
|
pc.siblings[gindex] = hash
|
||||||
|
pc.Unlock()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Merkleizers and proof collection methods
|
||||||
|
|
||||||
|
// merkleize recursively traverses an SSZ info and computes the Merkle root of the subtree.
|
||||||
|
//
|
||||||
|
// Proof collection:
|
||||||
|
// - During traversal it calls collectLeaf/collectSibling with the SSZ generalized indices (gindices)
|
||||||
|
// of visited nodes.
|
||||||
|
// - The collector only stores hashes for gindices that were pre-registered via addTarget
|
||||||
|
// (requiredLeaves/requiredSiblings). This makes the traversal multiproof-ready: you can register
|
||||||
|
// multiple targets before calling merkleize.
|
||||||
|
//
|
||||||
|
// SSZ types handled: basic types, containers, lists, vectors, bitlists, and bitvectors.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - info: SSZ type metadata for the current value.
|
||||||
|
// - v: reflect.Value of the current value.
|
||||||
|
// - currentGindex: generalized index of the current subtree root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the current subtree.
|
||||||
|
// - error: any error encountered during traversal/merkleization.
|
||||||
|
func (pc *proofCollector) merkleize(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
if info.sszType.isBasic() {
|
||||||
|
return pc.merkleizeBasicType(info.sszType, v, currentGindex)
|
||||||
|
}
|
||||||
|
switch info.sszType {
|
||||||
|
case Container:
|
||||||
|
return pc.merkleizeContainer(info, v, currentGindex)
|
||||||
|
case List:
|
||||||
|
return pc.merkleizeList(info, v, currentGindex)
|
||||||
|
case Vector:
|
||||||
|
return pc.merkleizeVector(info, v, currentGindex)
|
||||||
|
case Bitlist:
|
||||||
|
return pc.merkleizeBitlist(info, v, currentGindex)
|
||||||
|
case Bitvector:
|
||||||
|
return pc.merkleizeBitvector(info, v, currentGindex)
|
||||||
|
default:
|
||||||
|
return [32]byte{}, fmt.Errorf("unsupported SSZ type: %v", info.sszType)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeBasicType serializes a basic SSZ value into a 32-byte leaf chunk (little-endian, zero-padded).
|
||||||
|
//
|
||||||
|
// Proof collection:
|
||||||
|
// - It calls collectLeaf(currentGindex, leaf) and stores the leaf if currentGindex was pre-registered via addTarget.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - t: the SSZType (basic).
|
||||||
|
// - v: the reflect.Value of the basic value.
|
||||||
|
// - currentGindex: the generalized index (gindex) of this leaf.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: the 32-byte SSZ leaf chunk.
|
||||||
|
// - error: if the SSZType is not a supported basic type.
|
||||||
|
func (pc *proofCollector) merkleizeBasicType(t SSZType, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
var leaf [32]byte
|
||||||
|
|
||||||
|
// Serialize the value into a 32-byte chunk (little-endian, zero-padded)
|
||||||
|
switch t {
|
||||||
|
case Uint8:
|
||||||
|
leaf[0] = uint8(v.Uint())
|
||||||
|
case Uint16:
|
||||||
|
binary.LittleEndian.PutUint16(leaf[:2], uint16(v.Uint()))
|
||||||
|
case Uint32:
|
||||||
|
binary.LittleEndian.PutUint32(leaf[:4], uint32(v.Uint()))
|
||||||
|
case Uint64:
|
||||||
|
binary.LittleEndian.PutUint64(leaf[:8], v.Uint())
|
||||||
|
case Boolean:
|
||||||
|
if v.Bool() {
|
||||||
|
leaf[0] = 1
|
||||||
|
}
|
||||||
|
default:
|
||||||
|
return [32]byte{}, fmt.Errorf("unexpected basic type: %v", t)
|
||||||
|
}
|
||||||
|
|
||||||
|
pc.collectLeaf(currentGindex, leaf)
|
||||||
|
|
||||||
|
return leaf, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeContainer computes the Merkle root of an SSZ container by:
|
||||||
|
// 1. Merkleizing each field into a 32-byte subtree root
|
||||||
|
// 2. Merkleizing the field roots into the container root (padding to the next power-of-2)
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): depth = ssz.Depth(uint64(N)) and field i has gindex = (currentGindex << depth) + uint64(i).
|
||||||
|
// Proof collection: merkleize() computes each field root, merkleizeVectorAndCollect collects required siblings, and collectLeaf stores the container root if registered.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - info: SSZ type metadata for the container.
|
||||||
|
// - v: reflect.Value of the container value.
|
||||||
|
// - currentGindex: generalized index (gindex) of the container root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the container.
|
||||||
|
// - error: any error encountered while merkleizing fields.
|
||||||
|
func (pc *proofCollector) merkleizeContainer(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
// If the container root itself is the target, compute directly and return early.
|
||||||
|
// This avoids full subtree merkleization when we only need the root.
|
||||||
|
if _, ok := pc.requiredLeaves[currentGindex]; ok {
|
||||||
|
root, err := info.HashTreeRoot()
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
pc.collectLeaf(currentGindex, root)
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
v = dereferencePointer(v)
|
||||||
|
|
||||||
|
// Calculate depth: how many levels from container root to field leaves
|
||||||
|
numFields := len(ci.order)
|
||||||
|
depth := ssz.Depth(uint64(numFields))
|
||||||
|
|
||||||
|
// Step 1: Compute HTR for each subtree (field)
|
||||||
|
fieldRoots := make([][32]byte, numFields)
|
||||||
|
|
||||||
|
for i, name := range ci.order {
|
||||||
|
fieldInfo := ci.fields[name]
|
||||||
|
fieldVal := v.FieldByName(fieldInfo.goFieldName)
|
||||||
|
|
||||||
|
// Field i's gindex: shift currentGindex left by depth, then OR with field index
|
||||||
|
fieldGindex := currentGindex<<depth + uint64(i)
|
||||||
|
|
||||||
|
htr, err := pc.merkleize(fieldInfo.sszInfo, fieldVal, fieldGindex)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, fmt.Errorf("field %s: %w", name, err)
|
||||||
|
}
|
||||||
|
fieldRoots[i] = htr
|
||||||
|
}
|
||||||
|
|
||||||
|
// Step 2: Merkleize the field hashes into the container root,
|
||||||
|
// collecting sibling hashes if target is within this subtree
|
||||||
|
root := pc.merkleizeVectorAndCollect(fieldRoots, currentGindex, uint64(depth))
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeVectorBody computes the Merkle root of the "data" subtree for vector-like SSZ types
|
||||||
|
// (vectors and the data-part of lists/bitlists).
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): depth = ssz.Depth(limit); leafBase = subtreeRootGindex << depth; element/chunk i gindex = leafBase + uint64(i).
|
||||||
|
// Proof collection: merkleize() is called for composite elements; merkleizeVectorAndCollect collects required siblings at this layer.
|
||||||
|
// Padding: merkleizeVectorAndCollect uses trie.ZeroHashes as needed.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - elemInfo: SSZ type metadata for the element.
|
||||||
|
// - v: reflect.Value of the vector/list data.
|
||||||
|
// - length: number of actual elements present.
|
||||||
|
// - limit: virtual leaf capacity used for padding/Depth (fixed length for vectors, limit for lists).
|
||||||
|
// - subtreeRootGindex: gindex of the data subtree root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the data subtree.
|
||||||
|
// - error: any error encountered while merkleizing composite elements.
|
||||||
|
func (pc *proofCollector) merkleizeVectorBody(elemInfo *SszInfo, v reflect.Value, length int, limit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||||
|
depth := uint64(ssz.Depth(limit))
|
||||||
|
|
||||||
|
var chunks [][32]byte
|
||||||
|
if elemInfo.sszType.isBasic() {
|
||||||
|
// Serialize basic elements and pack into 32-byte chunks using ssz.PackByChunk.
|
||||||
|
elemSize, err := math.Int(itemLength(elemInfo))
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, fmt.Errorf("element size %d overflows int: %w", itemLength(elemInfo), err)
|
||||||
|
}
|
||||||
|
serialized := make([][]byte, length)
|
||||||
|
// Single contiguous allocation for all element data
|
||||||
|
allData := make([]byte, length*elemSize)
|
||||||
|
for i := range length {
|
||||||
|
buf := allData[i*elemSize : (i+1)*elemSize]
|
||||||
|
elem := v.Index(i)
|
||||||
|
if elemInfo.sszType == Boolean && elem.Bool() {
|
||||||
|
buf[0] = 1
|
||||||
|
} else {
|
||||||
|
bytesutil.PutLittleEndian(buf, elem.Uint(), elemSize)
|
||||||
|
}
|
||||||
|
serialized[i] = buf
|
||||||
|
}
|
||||||
|
chunks, err = ssz.PackByChunk(serialized)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
// Composite elements: compute each element root (no padding here; merkleizeVectorAndCollect pads).
|
||||||
|
chunks = make([][32]byte, length)
|
||||||
|
|
||||||
|
// Fall back to per-element merkleization with proper gindices for proof collection.
|
||||||
|
// Parallel execution
|
||||||
|
workerCount := min(runtime.GOMAXPROCS(0), length)
|
||||||
|
|
||||||
|
jobs := make(chan int, workerCount*16)
|
||||||
|
errCh := make(chan error, 1) // only need the first error
|
||||||
|
stopCh := make(chan struct{})
|
||||||
|
var stopOnce sync.Once
|
||||||
|
var wg sync.WaitGroup
|
||||||
|
|
||||||
|
worker := func() {
|
||||||
|
defer wg.Done()
|
||||||
|
for idx := range jobs {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
return
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
|
elemGindex := subtreeRootGindex<<depth + uint64(idx)
|
||||||
|
htr, err := pc.merkleize(elemInfo, v.Index(idx), elemGindex)
|
||||||
|
if err != nil {
|
||||||
|
stopOnce.Do(func() { close(stopCh) })
|
||||||
|
select {
|
||||||
|
case errCh <- fmt.Errorf("index %d: %w", idx, err):
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
chunks[idx] = htr
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
wg.Add(workerCount)
|
||||||
|
for range workerCount {
|
||||||
|
go worker()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Enqueue jobs; stop early if any worker reports an error.
|
||||||
|
enqueue:
|
||||||
|
for i := range length {
|
||||||
|
select {
|
||||||
|
case <-stopCh:
|
||||||
|
break enqueue
|
||||||
|
case jobs <- i:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
close(jobs)
|
||||||
|
|
||||||
|
wg.Wait()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case err := <-errCh:
|
||||||
|
return [32]byte{}, err
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, depth)
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeVector computes the Merkle root of an SSZ vector (fixed-length).
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): currentGindex is the gindex of the vector root; element/chunk gindices are derived
|
||||||
|
// inside merkleizeVectorBody using leafBase = currentGindex << ssz.Depth(leaves).
|
||||||
|
//
|
||||||
|
// Proof collection: merkleizeVectorBody performs element/chunk merkleization and collects required siblings at the
|
||||||
|
// vector layer; collectLeaf stores the vector root if currentGindex was registered via addTarget.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - info: SSZ type metadata for the vector.
|
||||||
|
// - v: reflect.Value of the vector value.
|
||||||
|
// - currentGindex: generalized index (gindex) of the vector root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the vector.
|
||||||
|
// - error: any error encountered while merkleizing composite elements.
|
||||||
|
func (pc *proofCollector) merkleizeVector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
vi, err := info.VectorInfo()
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
length, err := math.Int(vi.Length())
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, fmt.Errorf("vector length %d overflows int: %w", vi.Length(), err)
|
||||||
|
}
|
||||||
|
elemInfo := vi.element
|
||||||
|
|
||||||
|
// Determine the virtual leaf capacity for the vector.
|
||||||
|
leaves, err := getChunkCount(info)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := pc.merkleizeVectorBody(elemInfo, v, length, leaves, currentGindex)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// If the vector root itself is the target
|
||||||
|
pc.collectLeaf(currentGindex, root)
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeList computes the Merkle root of an SSZ list by merkleizing its data subtree and mixing in the length.
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): dataRoot is the left child of the list root (dataRootGindex = currentGindex*2); the length mixin is the right child (currentGindex*2+1).
|
||||||
|
// Proof collection: merkleizeVectorBody computes the data root (collecting required siblings in the data subtree), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the list root if registered.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - info: SSZ type metadata for the list.
|
||||||
|
// - v: reflect.Value of the list value.
|
||||||
|
// - currentGindex: generalized index (gindex) of the list root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the list.
|
||||||
|
// - error: any error encountered while merkleizing the data subtree.
|
||||||
|
func (pc *proofCollector) merkleizeList(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
li, err := info.ListInfo()
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
length := v.Len()
|
||||||
|
elemInfo := li.element
|
||||||
|
|
||||||
|
chunks := make([][32]byte, 2)
|
||||||
|
// Compute the length hash (little-endian uint256)
|
||||||
|
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(length))
|
||||||
|
|
||||||
|
// Data subtree root is the left child of the list root.
|
||||||
|
dataRootGindex := currentGindex * 2
|
||||||
|
|
||||||
|
// Compute virtual leaf capacity for the data subtree.
|
||||||
|
leaves, err := getChunkCount(info)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks[0], err = pc.merkleizeVectorBody(elemInfo, v, length, leaves, dataRootGindex)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||||
|
// Compute the final list root: hash(dataRoot || lengthHash)
|
||||||
|
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||||
|
|
||||||
|
// If the list root itself is the target
|
||||||
|
pc.collectLeaf(currentGindex, root)
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeBitvectorBody computes the Merkle root of a bitvector-like byte sequence by packing it into 32-byte chunks
|
||||||
|
// and merkleizing those chunks as a fixed-capacity vector (padding with trie.ZeroHashes as needed).
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): depth = ssz.Depth(chunkLimit); leafBase = subtreeRootGindex << depth; chunk i uses gindex = leafBase + uint64(i).
|
||||||
|
// Proof collection: merkleizeVectorAndCollect collects required sibling hashes at the chunk-merkleization layer.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - data: raw byte sequence representing the bitvector payload.
|
||||||
|
// - chunkLimit: fixed/limit number of 32-byte chunks (used for padding/Depth).
|
||||||
|
// - subtreeRootGindex: gindex of the bitvector data subtree root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the bitvector data subtree.
|
||||||
|
// - error: any error encountered while packing data into chunks.
|
||||||
|
func (pc *proofCollector) merkleizeBitvectorBody(data []byte, chunkLimit uint64, subtreeRootGindex uint64) ([32]byte, error) {
|
||||||
|
depth := ssz.Depth(chunkLimit)
|
||||||
|
chunks, err := ssz.PackByChunk([][]byte{data})
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
root := pc.merkleizeVectorAndCollect(chunks, subtreeRootGindex, uint64(depth))
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeBitvector computes the Merkle root of a fixed-length SSZ bitvector and collects proof nodes for targets.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - info: SSZ type metadata for the bitvector.
|
||||||
|
// - v: reflect.Value of the bitvector value.
|
||||||
|
// - currentGindex: generalized index (gindex) of the bitvector root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the bitvector.
|
||||||
|
// - error: any error encountered during packing or merkleization.
|
||||||
|
func (pc *proofCollector) merkleizeBitvector(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
bitvectorBytes := v.Bytes()
|
||||||
|
if len(bitvectorBytes) == 0 {
|
||||||
|
return [32]byte{}, fmt.Errorf("bitvector field is uninitialized (nil or empty slice)")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Compute virtual leaf capacity for the bitvector.
|
||||||
|
numChunks, err := getChunkCount(info)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
root, err := pc.merkleizeBitvectorBody(bitvectorBytes, numChunks, currentGindex)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
pc.collectLeaf(currentGindex, root)
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeBitlist computes the Merkle root of an SSZ bitlist by merkleizing its data chunks and mixing in the bit length.
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and the length mixin is the right child (currentGindex*2+1).
|
||||||
|
// Proof collection: merkleizeBitvectorBody computes the data root (collecting required siblings under dataRootGindex), and mixinLengthAndCollect collects required siblings at the length-mixin level; collectLeaf stores the bitlist root if registered.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - info: SSZ type metadata for the bitlist.
|
||||||
|
// - v: reflect.Value of the bitlist value.
|
||||||
|
// - currentGindex: generalized index (gindex) of the bitlist root.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the bitlist.
|
||||||
|
// - error: any error encountered while merkleizing the data subtree.
|
||||||
|
func (pc *proofCollector) merkleizeBitlist(info *SszInfo, v reflect.Value, currentGindex uint64) ([32]byte, error) {
|
||||||
|
bi, err := info.BitlistInfo()
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
bitlistBytes := v.Bytes()
|
||||||
|
|
||||||
|
// Use go-bitfield to get bytes with termination bit cleared
|
||||||
|
bl := bitfield.Bitlist(bitlistBytes)
|
||||||
|
data := bl.BytesNoTrim()
|
||||||
|
|
||||||
|
// Get the bit length from bitlistInfo
|
||||||
|
bitLength := bi.Length()
|
||||||
|
|
||||||
|
// Get the chunk limit from getChunkCount
|
||||||
|
limitChunks, err := getChunkCount(info)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
chunks := make([][32]byte, 2)
|
||||||
|
// Compute the length hash (little-endian uint256)
|
||||||
|
binary.LittleEndian.PutUint64(chunks[1][:8], uint64(bitLength))
|
||||||
|
|
||||||
|
dataRootGindex := currentGindex * 2
|
||||||
|
chunks[0], err = pc.merkleizeBitvectorBody(data, limitChunks, dataRootGindex)
|
||||||
|
if err != nil {
|
||||||
|
return [32]byte{}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
// Handle the length mixin level (and proof bookkeeping at this level).
|
||||||
|
root := pc.mixinLengthAndCollect(currentGindex, chunks)
|
||||||
|
|
||||||
|
pc.collectLeaf(currentGindex, root)
|
||||||
|
|
||||||
|
return root, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
// merkleizeVectorAndCollect merkleizes a slice of 32-byte leaf nodes into a subtree root, padding to a virtual size of 2^depth.
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): at layer i (0-based), nodes have gindices levelBase = subtreeGeneralizedIndex << (depth-i) and node gindex = levelBase + idx.
|
||||||
|
// Proof collection: for each layer it calls collectSibling(nodeGindex, nodeHash) and stores only those gindices registered via addTarget.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - elements: leaf-level hashes (may be shorter than 2^depth; padding is applied with trie.ZeroHashes).
|
||||||
|
// - subtreeGeneralizedIndex: gindex of the subtree root.
|
||||||
|
// - depth: number of merkleization layers from subtree root to leaves.
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: Merkle root of the subtree.
|
||||||
|
func (pc *proofCollector) merkleizeVectorAndCollect(elements [][32]byte, subtreeGeneralizedIndex uint64, depth uint64) [32]byte {
|
||||||
|
// Return zerohash at depth
|
||||||
|
if len(elements) == 0 {
|
||||||
|
return trie.ZeroHashes[depth]
|
||||||
|
}
|
||||||
|
for i := range depth {
|
||||||
|
layerLen := len(elements)
|
||||||
|
oddNodeLength := layerLen%2 == 1
|
||||||
|
if oddNodeLength {
|
||||||
|
zerohash := trie.ZeroHashes[i]
|
||||||
|
elements = append(elements, zerohash)
|
||||||
|
}
|
||||||
|
|
||||||
|
levelBaseGindex := subtreeGeneralizedIndex << (depth - i)
|
||||||
|
for idx := range elements {
|
||||||
|
gindex := levelBaseGindex + uint64(idx)
|
||||||
|
pc.collectSibling(gindex, elements[idx])
|
||||||
|
pc.collectLeaf(gindex, elements[idx])
|
||||||
|
}
|
||||||
|
|
||||||
|
elements = htr.VectorizedSha256(elements)
|
||||||
|
}
|
||||||
|
return elements[0]
|
||||||
|
}
|
||||||
|
|
||||||
|
// mixinLengthAndCollect computes the final mix-in root for list/bitlist values:
|
||||||
|
//
|
||||||
|
// root = hash(dataRoot, lengthHash)
|
||||||
|
//
|
||||||
|
// where chunks[0] is dataRoot and chunks[1] is the 32-byte length hash.
|
||||||
|
//
|
||||||
|
// Generalized indices (gindices): dataRoot is the left child (dataRootGindex = currentGindex*2) and lengthHash is the right child (lengthHashGindex = currentGindex*2+1).
|
||||||
|
// Proof collection: it calls collectSibling/collectLeaf for both child gindices; the collector stores them only if they were registered via addTarget.
|
||||||
|
//
|
||||||
|
// Parameters:
|
||||||
|
// - currentGindex: gindex of the parent node (list/bitlist root).
|
||||||
|
// - chunks: two 32-byte nodes: [dataRoot, lengthHash].
|
||||||
|
//
|
||||||
|
// Returns:
|
||||||
|
// - [32]byte: mixed-in Merkle root (or zero value on hashing error).
|
||||||
|
// - error: any error encountered during hashing.
|
||||||
|
func (pc *proofCollector) mixinLengthAndCollect(currentGindex uint64, chunks [][32]byte) [32]byte {
|
||||||
|
dataRoot, lengthHash := chunks[0], chunks[1]
|
||||||
|
dataRootGindex, lengthHashGindex := currentGindex*2, currentGindex*2+1
|
||||||
|
|
||||||
|
pc.collectSibling(dataRootGindex, dataRoot)
|
||||||
|
pc.collectSibling(lengthHashGindex, lengthHash)
|
||||||
|
|
||||||
|
pc.collectLeaf(dataRootGindex, dataRoot)
|
||||||
|
pc.collectLeaf(lengthHashGindex, lengthHash)
|
||||||
|
|
||||||
|
return ssz.MixInLength(dataRoot, lengthHash[:])
|
||||||
|
}
|
||||||
531
encoding/ssz/query/proof_collector_test.go
Normal file
531
encoding/ssz/query/proof_collector_test.go
Normal file
@@ -0,0 +1,531 @@
|
|||||||
|
package query
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/sha256"
|
||||||
|
"encoding/binary"
|
||||||
|
"reflect"
|
||||||
|
"slices"
|
||||||
|
"testing"
|
||||||
|
|
||||||
|
"github.com/OffchainLabs/go-bitfield"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/beacon-chain/state/stateutil"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/consensus-types/primitives"
|
||||||
|
ssz "github.com/OffchainLabs/prysm/v7/encoding/ssz"
|
||||||
|
ethpb "github.com/OffchainLabs/prysm/v7/proto/prysm/v1alpha1"
|
||||||
|
sszquerypb "github.com/OffchainLabs/prysm/v7/proto/ssz_query/testing"
|
||||||
|
"github.com/OffchainLabs/prysm/v7/testing/require"
|
||||||
|
)
|
||||||
|
|
||||||
|
func TestProofCollector_New(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
|
||||||
|
require.NotNil(t, pc)
|
||||||
|
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||||
|
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||||
|
require.Equal(t, 0, len(pc.siblings))
|
||||||
|
require.Equal(t, 0, len(pc.leaves))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Reset(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
pc.requiredSiblings[3] = struct{}{}
|
||||||
|
pc.requiredLeaves[5] = struct{}{}
|
||||||
|
pc.siblings[3] = [32]byte{1}
|
||||||
|
pc.leaves[5] = [32]byte{2}
|
||||||
|
|
||||||
|
pc.reset()
|
||||||
|
|
||||||
|
require.Equal(t, 0, len(pc.requiredSiblings))
|
||||||
|
require.Equal(t, 0, len(pc.requiredLeaves))
|
||||||
|
require.Equal(t, 0, len(pc.siblings))
|
||||||
|
require.Equal(t, 0, len(pc.leaves))
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_AddTarget(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
pc.addTarget(5)
|
||||||
|
|
||||||
|
_, hasLeaf := pc.requiredLeaves[5]
|
||||||
|
_, hasSibling4 := pc.requiredSiblings[4]
|
||||||
|
_, hasSibling3 := pc.requiredSiblings[3]
|
||||||
|
_, hasSibling1 := pc.requiredSiblings[1] // GI 1 is the root
|
||||||
|
|
||||||
|
require.Equal(t, true, hasLeaf)
|
||||||
|
require.Equal(t, true, hasSibling4)
|
||||||
|
require.Equal(t, true, hasSibling3)
|
||||||
|
require.Equal(t, false, hasSibling1)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_ToProof(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
pc.addTarget(5)
|
||||||
|
|
||||||
|
leaf := [32]byte{9}
|
||||||
|
sibling4 := [32]byte{4}
|
||||||
|
sibling3 := [32]byte{3}
|
||||||
|
|
||||||
|
pc.collectLeaf(5, leaf)
|
||||||
|
pc.collectSibling(4, sibling4)
|
||||||
|
pc.collectSibling(3, sibling3)
|
||||||
|
|
||||||
|
proof, err := pc.toProof()
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, 5, proof.Index)
|
||||||
|
require.DeepEqual(t, leaf[:], proof.Leaf)
|
||||||
|
require.Equal(t, 2, len(proof.Hashes))
|
||||||
|
require.DeepEqual(t, sibling4[:], proof.Hashes[0])
|
||||||
|
require.DeepEqual(t, sibling3[:], proof.Hashes[1])
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_ToProof_NoLeaves(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
_, err := pc.toProof()
|
||||||
|
require.NotNil(t, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_CollectLeaf(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
leaf := [32]byte{7}
|
||||||
|
|
||||||
|
pc.collectLeaf(10, leaf)
|
||||||
|
require.Equal(t, 0, len(pc.leaves))
|
||||||
|
|
||||||
|
pc.addTarget(10)
|
||||||
|
pc.collectLeaf(10, leaf)
|
||||||
|
stored, ok := pc.leaves[10]
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, leaf, stored)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_CollectSibling(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
hash := [32]byte{5}
|
||||||
|
|
||||||
|
pc.collectSibling(4, hash)
|
||||||
|
require.Equal(t, 0, len(pc.siblings))
|
||||||
|
|
||||||
|
pc.addTarget(5)
|
||||||
|
pc.collectSibling(4, hash)
|
||||||
|
stored, ok := pc.siblings[4]
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, hash, stored)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Merkleize_BasicTypes(t *testing.T) {
|
||||||
|
testCases := []struct {
|
||||||
|
name string
|
||||||
|
sszType SSZType
|
||||||
|
value any
|
||||||
|
expected [32]byte
|
||||||
|
}{
|
||||||
|
{
|
||||||
|
name: "uint8",
|
||||||
|
sszType: Uint8,
|
||||||
|
value: uint8(0x11),
|
||||||
|
expected: func() [32]byte {
|
||||||
|
var leaf [32]byte
|
||||||
|
leaf[0] = 0x11
|
||||||
|
return leaf
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uint16",
|
||||||
|
sszType: Uint16,
|
||||||
|
value: uint16(0x2211),
|
||||||
|
expected: func() [32]byte {
|
||||||
|
var leaf [32]byte
|
||||||
|
binary.LittleEndian.PutUint16(leaf[:2], 0x2211)
|
||||||
|
return leaf
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uint32",
|
||||||
|
sszType: Uint32,
|
||||||
|
value: uint32(0x44332211),
|
||||||
|
expected: func() [32]byte {
|
||||||
|
var leaf [32]byte
|
||||||
|
binary.LittleEndian.PutUint32(leaf[:4], 0x44332211)
|
||||||
|
return leaf
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "uint64",
|
||||||
|
sszType: Uint64,
|
||||||
|
value: uint64(0x8877665544332211),
|
||||||
|
expected: func() [32]byte {
|
||||||
|
var leaf [32]byte
|
||||||
|
binary.LittleEndian.PutUint64(leaf[:8], 0x8877665544332211)
|
||||||
|
return leaf
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "bool",
|
||||||
|
sszType: Boolean,
|
||||||
|
value: true,
|
||||||
|
expected: func() [32]byte {
|
||||||
|
var leaf [32]byte
|
||||||
|
leaf[0] = 1
|
||||||
|
return leaf
|
||||||
|
}(),
|
||||||
|
},
|
||||||
|
}
|
||||||
|
|
||||||
|
for _, tc := range testCases {
|
||||||
|
t.Run(tc.name, func(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
gindex := uint64(3)
|
||||||
|
pc.addTarget(gindex)
|
||||||
|
|
||||||
|
leaf, err := pc.merkleizeBasicType(tc.sszType, reflect.ValueOf(tc.value), gindex)
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, tc.expected, leaf)
|
||||||
|
|
||||||
|
stored, ok := pc.leaves[gindex]
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, tc.expected, stored)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Merkleize_Container(t *testing.T) {
|
||||||
|
container := makeFixedTestContainer()
|
||||||
|
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
pc := newProofCollector()
|
||||||
|
pc.addTarget(1)
|
||||||
|
|
||||||
|
root, err := pc.merkleize(info, reflect.ValueOf(container), 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expected, err := container.HashTreeRoot()
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
|
||||||
|
stored, ok := pc.leaves[1]
|
||||||
|
require.Equal(t, true, ok)
|
||||||
|
require.Equal(t, expected, stored)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Merkleize_Vector(t *testing.T) {
|
||||||
|
container := makeFixedTestContainer()
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
field := ci.fields["vector_field"]
|
||||||
|
|
||||||
|
pc := newProofCollector()
|
||||||
|
root, err := pc.merkleizeVector(field.sszInfo, reflect.ValueOf(container.VectorField), 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
serialized := make([][]byte, len(container.VectorField))
|
||||||
|
for i, v := range container.VectorField {
|
||||||
|
buf := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(buf, v)
|
||||||
|
serialized[i] = buf
|
||||||
|
}
|
||||||
|
chunks, err := ssz.PackByChunk(serialized)
|
||||||
|
require.NoError(t, err)
|
||||||
|
limit, err := getChunkCount(field.sszInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected := ssz.MerkleizeVector(chunks, limit)
|
||||||
|
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Merkleize_List(t *testing.T) {
|
||||||
|
list := []*sszquerypb.FixedNestedContainer{
|
||||||
|
makeFixedNestedContainer(1),
|
||||||
|
makeFixedNestedContainer(2),
|
||||||
|
}
|
||||||
|
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
field := ci.fields["field_list_container"]
|
||||||
|
|
||||||
|
pc := newProofCollector()
|
||||||
|
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
listInfo, err := field.sszInfo.ListInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Merkleize_Bitvector(t *testing.T) {
|
||||||
|
container := makeFixedTestContainer()
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
field := ci.fields["bitvector64_field"]
|
||||||
|
|
||||||
|
pc := newProofCollector()
|
||||||
|
root, err := pc.merkleizeBitvector(field.sszInfo, reflect.ValueOf(container.Bitvector64Field), 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
expected, err := ssz.MerkleizeByteSliceSSZ([]byte(container.Bitvector64Field))
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_Merkleize_Bitlist(t *testing.T) {
|
||||||
|
bitlist := bitfield.NewBitlist(16)
|
||||||
|
bitlist.SetBitAt(3, true)
|
||||||
|
bitlist.SetBitAt(8, true)
|
||||||
|
|
||||||
|
container := makeVariableTestContainer(nil, bitlist)
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
field := ci.fields["bitlist_field"]
|
||||||
|
|
||||||
|
pc := newProofCollector()
|
||||||
|
root, err := pc.merkleizeBitlist(field.sszInfo, reflect.ValueOf(container.BitlistField), 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
bitlistInfo, err := field.sszInfo.BitlistInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected, err := ssz.BitlistRoot(bitfield.Bitlist(bitlist), bitlistInfo.Limit())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_MerkleizeVectorBody_Basic(t *testing.T) {
|
||||||
|
container := makeFixedTestContainer()
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
field := ci.fields["vector_field"]
|
||||||
|
vectorInfo, err := field.sszInfo.VectorInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
length := len(container.VectorField)
|
||||||
|
limit, err := getChunkCount(field.sszInfo)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
pc := newProofCollector()
|
||||||
|
root, err := pc.merkleizeVectorBody(vectorInfo.element, reflect.ValueOf(container.VectorField), length, limit, 2)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
serialized := make([][]byte, len(container.VectorField))
|
||||||
|
for i, v := range container.VectorField {
|
||||||
|
buf := make([]byte, 8)
|
||||||
|
binary.LittleEndian.PutUint64(buf, v)
|
||||||
|
serialized[i] = buf
|
||||||
|
}
|
||||||
|
chunks, err := ssz.PackByChunk(serialized)
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected := ssz.MerkleizeVector(chunks, limit)
|
||||||
|
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_MerkleizeVectorAndCollect(t *testing.T) {
|
||||||
|
pc := newProofCollector()
|
||||||
|
pc.addTarget(6)
|
||||||
|
|
||||||
|
elements := [][32]byte{{1}, {2}}
|
||||||
|
expected := ssz.MerkleizeVector(slices.Clone(elements), 2)
|
||||||
|
root := pc.merkleizeVectorAndCollect(elements, 3, 1)
|
||||||
|
|
||||||
|
storedLeaf, hasLeaf := pc.leaves[6]
|
||||||
|
storedSibling, hasSibling := pc.siblings[7]
|
||||||
|
|
||||||
|
require.Equal(t, true, hasLeaf)
|
||||||
|
require.Equal(t, true, hasSibling)
|
||||||
|
require.Equal(t, elements[0], storedLeaf)
|
||||||
|
require.Equal(t, elements[1], storedSibling)
|
||||||
|
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func TestProofCollector_MixinLengthAndCollect(t *testing.T) {
|
||||||
|
list := []*sszquerypb.FixedNestedContainer{
|
||||||
|
makeFixedNestedContainer(1),
|
||||||
|
makeFixedNestedContainer(2),
|
||||||
|
}
|
||||||
|
container := makeVariableTestContainer(list, bitfield.NewBitlist(1))
|
||||||
|
info, err := AnalyzeObject(container)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
ci, err := info.ContainerInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
field := ci.fields["field_list_container"]
|
||||||
|
|
||||||
|
// Target gindex 2 (data root) - sibling at gindex 3 (length hash) should be collected
|
||||||
|
pc := newProofCollector()
|
||||||
|
pc.addTarget(2)
|
||||||
|
root, err := pc.merkleizeList(field.sszInfo, reflect.ValueOf(list), 1)
|
||||||
|
require.NoError(t, err)
|
||||||
|
|
||||||
|
listInfo, err := field.sszInfo.ListInfo()
|
||||||
|
require.NoError(t, err)
|
||||||
|
expected, err := ssz.MerkleizeListSSZ(list, listInfo.Limit())
|
||||||
|
require.NoError(t, err)
|
||||||
|
require.Equal(t, expected, root)
|
||||||
|
|
||||||
|
// Verify data root is collected as leaf at gindex 2
|
||||||
|
storedLeaf, hasLeaf := pc.leaves[2]
|
||||||
|
require.Equal(t, true, hasLeaf)
|
||||||
|
|
||||||
|
// Verify length hash is collected as sibling at gindex 3
|
||||||
|
storedSibling, hasSibling := pc.siblings[3]
|
||||||
|
require.Equal(t, true, hasSibling)
|
||||||
|
|
||||||
|
// Verify the root is hash(dataRoot || lengthHash)
|
||||||
|
expectedBuf := append(storedLeaf[:], storedSibling[:]...)
|
||||||
|
expectedRoot := sha256.Sum256(expectedBuf)
|
||||||
|
require.Equal(t, expectedRoot, root)
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkOptimizedValidatorRoots(b *testing.B) {
|
||||||
|
validators := make([]*ethpb.Validator, 1000)
|
||||||
|
for i := range validators {
|
||||||
|
validators[i] = makeTestValidator(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for b.Loop() {
|
||||||
|
_, err := stateutil.OptimizedValidatorRoots(validators)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func BenchmarkProofCollectorMerkleize(b *testing.B) {
|
||||||
|
validators := make([]*ethpb.Validator, 1000)
|
||||||
|
for i := range validators {
|
||||||
|
validators[i] = makeTestValidator(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
info, err := AnalyzeObject(validators[0])
|
||||||
|
require.NoError(b, err)
|
||||||
|
|
||||||
|
b.ResetTimer()
|
||||||
|
for b.Loop() {
|
||||||
|
for _, val := range validators {
|
||||||
|
pc := newProofCollector()
|
||||||
|
v := reflect.ValueOf(val)
|
||||||
|
_, err := pc.merkleize(info, v, 1)
|
||||||
|
if err != nil {
|
||||||
|
b.Fatal(err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeTestValidator(i int) *ethpb.Validator {
|
||||||
|
pubkey := make([]byte, 48)
|
||||||
|
for j := range pubkey {
|
||||||
|
pubkey[j] = byte(i + j)
|
||||||
|
}
|
||||||
|
|
||||||
|
withdrawalCredentials := make([]byte, 32)
|
||||||
|
for j := range withdrawalCredentials {
|
||||||
|
withdrawalCredentials[j] = byte(255 - ((i + j) % 256))
|
||||||
|
}
|
||||||
|
|
||||||
|
return ðpb.Validator{
|
||||||
|
PublicKey: pubkey,
|
||||||
|
WithdrawalCredentials: withdrawalCredentials,
|
||||||
|
EffectiveBalance: uint64(32000000000 + i),
|
||||||
|
Slashed: i%2 == 0,
|
||||||
|
ActivationEligibilityEpoch: primitives.Epoch(i),
|
||||||
|
ActivationEpoch: primitives.Epoch(i + 1),
|
||||||
|
ExitEpoch: primitives.Epoch(i + 2),
|
||||||
|
WithdrawableEpoch: primitives.Epoch(i + 3),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeFixedNestedContainer(value uint64) *sszquerypb.FixedNestedContainer {
|
||||||
|
value2 := make([]byte, 32)
|
||||||
|
for i := range value2 {
|
||||||
|
value2[i] = byte(i)
|
||||||
|
}
|
||||||
|
return &sszquerypb.FixedNestedContainer{
|
||||||
|
Value1: value,
|
||||||
|
Value2: value2,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeFixedTestContainer() *sszquerypb.FixedTestContainer {
|
||||||
|
fieldBytes32 := make([]byte, 32)
|
||||||
|
for i := range fieldBytes32 {
|
||||||
|
fieldBytes32[i] = byte(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
vectorField := make([]uint64, 24)
|
||||||
|
for i := range vectorField {
|
||||||
|
vectorField[i] = uint64(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
rows := make([][]byte, 5)
|
||||||
|
for i := range rows {
|
||||||
|
row := make([]byte, 32)
|
||||||
|
for j := range row {
|
||||||
|
row[j] = byte(i) + byte(j)
|
||||||
|
}
|
||||||
|
rows[i] = row
|
||||||
|
}
|
||||||
|
|
||||||
|
bitvector64 := bitfield.NewBitvector64()
|
||||||
|
bitvector64.SetBitAt(1, true)
|
||||||
|
bitvector512 := bitfield.NewBitvector512()
|
||||||
|
bitvector512.SetBitAt(10, true)
|
||||||
|
|
||||||
|
trailing := make([]byte, 56)
|
||||||
|
for i := range trailing {
|
||||||
|
trailing[i] = byte(i)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sszquerypb.FixedTestContainer{
|
||||||
|
FieldUint32: 1,
|
||||||
|
FieldUint64: 2,
|
||||||
|
FieldBool: true,
|
||||||
|
FieldBytes32: fieldBytes32,
|
||||||
|
Nested: makeFixedNestedContainer(3),
|
||||||
|
VectorField: vectorField,
|
||||||
|
TwoDimensionBytesField: rows,
|
||||||
|
Bitvector64Field: bitvector64,
|
||||||
|
Bitvector512Field: bitvector512,
|
||||||
|
TrailingField: trailing,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func makeVariableTestContainer(list []*sszquerypb.FixedNestedContainer, bitlist bitfield.Bitlist) *sszquerypb.VariableTestContainer {
|
||||||
|
leading := make([]byte, 32)
|
||||||
|
for i := range leading {
|
||||||
|
leading[i] = byte(i)
|
||||||
|
}
|
||||||
|
trailing := make([]byte, 56)
|
||||||
|
for i := range trailing {
|
||||||
|
trailing[i] = byte(255 - i)
|
||||||
|
}
|
||||||
|
|
||||||
|
if bitlist == nil {
|
||||||
|
bitlist = bitfield.NewBitlist(0)
|
||||||
|
}
|
||||||
|
|
||||||
|
return &sszquerypb.VariableTestContainer{
|
||||||
|
LeadingField: leading,
|
||||||
|
FieldListContainer: list,
|
||||||
|
BitlistField: bitlist,
|
||||||
|
TrailingField: trailing,
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -389,6 +389,7 @@ func TestHashTreeRoot(t *testing.T) {
|
|||||||
require.NoError(t, err, "HashTreeRoot should not return an error")
|
require.NoError(t, err, "HashTreeRoot should not return an error")
|
||||||
expectedHashTreeRoot, err := tt.obj.HashTreeRoot()
|
expectedHashTreeRoot, err := tt.obj.HashTreeRoot()
|
||||||
require.NoError(t, err, "HashTreeRoot on original object should not return an error")
|
require.NoError(t, err, "HashTreeRoot on original object should not return an error")
|
||||||
|
// Verify the Merkle tree root matches with the SSZ generated HashTreeRoot
|
||||||
require.Equal(t, expectedHashTreeRoot, hashTreeRoot, "HashTreeRoot from sszInfo should match original object's HashTreeRoot")
|
require.Equal(t, expectedHashTreeRoot, hashTreeRoot, "HashTreeRoot from sszInfo should match original object's HashTreeRoot")
|
||||||
})
|
})
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -1,7 +1,7 @@
|
|||||||
diff -urN a/BUILD.bazel b/BUILD.bazel
|
diff -urN a/BUILD.bazel b/BUILD.bazel
|
||||||
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
|
--- a/BUILD.bazel 1969-12-31 18:00:00.000000000 -0600
|
||||||
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
|
+++ b/BUILD.bazel 2025-01-05 12:00:00.000000000 -0600
|
||||||
@@ -0,0 +1,89 @@
|
@@ -0,0 +1,90 @@
|
||||||
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
+load("@io_bazel_rules_go//go:def.bzl", "go_library")
|
||||||
+
|
+
|
||||||
+go_library(
|
+go_library(
|
||||||
@@ -32,6 +32,7 @@ diff -urN a/BUILD.bazel b/BUILD.bazel
|
|||||||
+ ],
|
+ ],
|
||||||
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
|
+ "@io_bazel_rules_go//go/platform:darwin_amd64": [
|
||||||
+ "bindings_darwin_amd64.go",
|
+ "bindings_darwin_amd64.go",
|
||||||
|
+ "wrapper_darwin_amd64.s",
|
||||||
+ ],
|
+ ],
|
||||||
+ "//conditions:default": [],
|
+ "//conditions:default": [],
|
||||||
+ }),
|
+ }),
|
||||||
|
|||||||
Reference in New Issue
Block a user