Remove Support for 2d-list Hashers (#10290)

* add changes

* fix logic bug

* fix

* potuz's review

* Update beacon-chain/state/stateutil/eth1_root.go

Co-authored-by: Potuz <potuz@prysmaticlabs.com>

Co-authored-by: Potuz <potuz@prysmaticlabs.com>
This commit is contained in:
Nishant Das
2022-03-04 23:19:07 +08:00
committed by GitHub
parent 1437cb8982
commit 1af11885ee
14 changed files with 96 additions and 249 deletions

View File

@@ -19,13 +19,13 @@ func BitlistRoot(hasher HashFn, bfield bitfield.Bitfield, maxCapacity uint64) ([
limit := (maxCapacity + 255) / 256
if bfield == nil || bfield.Len() == 0 {
length := make([]byte, 32)
root, err := BitwiseMerkleize(hasher, [][]byte{}, 0, limit)
root, err := BitwiseMerkleize(hasher, [][32]byte{}, 0, limit)
if err != nil {
return [32]byte{}, err
}
return MixInLength(root, length), nil
}
chunks, err := Pack([][]byte{bfield.Bytes()})
chunks, err := PackByChunk([][]byte{bfield.Bytes()})
if err != nil {
return [32]byte{}, err
}
@@ -47,22 +47,7 @@ func BitlistRoot(hasher HashFn, bfield bitfield.Bitfield, maxCapacity uint64) ([
// and return the root.
// Note that merkleize on a single chunk is simply that chunk, i.e. the identity
// when the number of chunks is one.
func BitwiseMerkleize(hasher HashFn, chunks [][]byte, count, limit uint64) ([32]byte, error) {
if count > limit {
return [32]byte{}, errors.New("merkleizing list that is too large, over limit")
}
if features.Get().EnableVectorizedHTR {
return MerkleizeList(chunks, limit), nil
}
hashFn := NewHasherFunc(hasher)
leafIndexer := func(i uint64) []byte {
return chunks[i]
}
return Merkleize(hashFn, count, limit, leafIndexer), nil
}
// BitwiseMerkleizeArrays is used when a set of 32-byte root chunks are provided.
func BitwiseMerkleizeArrays(hasher HashFn, chunks [][32]byte, count, limit uint64) ([32]byte, error) {
func BitwiseMerkleize(hasher HashFn, chunks [][32]byte, count, limit uint64) ([32]byte, error) {
if count > limit {
return [32]byte{}, errors.New("merkleizing list that is too large, over limit")
}
@@ -76,51 +61,6 @@ func BitwiseMerkleizeArrays(hasher HashFn, chunks [][32]byte, count, limit uint6
return Merkleize(hashFn, count, limit, leafIndexer), nil
}
// Pack a given byte array's final chunk with zeroes if needed.
func Pack(serializedItems [][]byte) ([][]byte, error) {
areAllEmpty := true
for _, item := range serializedItems {
if !bytes.Equal(item, []byte{}) {
areAllEmpty = false
break
}
}
// If there are no items, we return an empty chunk.
if len(serializedItems) == 0 || areAllEmpty {
emptyChunk := make([]byte, bytesPerChunk)
return [][]byte{emptyChunk}, nil
} else if len(serializedItems[0]) == bytesPerChunk {
// If each item has exactly BYTES_PER_CHUNK length, we return the list of serialized items.
return serializedItems, nil
}
// We flatten the list in order to pack its items into byte chunks correctly.
var orderedItems []byte
for _, item := range serializedItems {
orderedItems = append(orderedItems, item...)
}
numItems := len(orderedItems)
var chunks [][]byte
for i := 0; i < numItems; i += bytesPerChunk {
j := i + bytesPerChunk
// We create our upper bound index of the chunk, if it is greater than numItems,
// we set it as numItems itself.
if j > numItems {
j = numItems
}
// We create chunks from the list of items based on the
// indices determined above.
chunks = append(chunks, orderedItems[i:j])
}
// Right-pad the last chunk with zero bytes if it does not
// have length bytesPerChunk.
lastChunk := chunks[len(chunks)-1]
for len(lastChunk) < bytesPerChunk {
lastChunk = append(lastChunk, 0)
}
chunks[len(chunks)-1] = lastChunk
return chunks, nil
}
// PackByChunk a given byte array's final chunk with zeroes if needed.
func PackByChunk(serializedItems [][]byte) ([][bytesPerChunk]byte, error) {
emptyChunk := [bytesPerChunk]byte{}

View File

@@ -23,24 +23,9 @@ func TestBitlistRoot(t *testing.T) {
assert.Equal(t, expected, result)
}
func TestBitwiseMerkleize(t *testing.T) {
hasher := hash.CustomSHA256Hasher()
chunks := [][]byte{
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
{11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
count := uint64(2)
limit := uint64(2)
expected := [32]byte{194, 32, 213, 52, 220, 127, 18, 240, 43, 151, 19, 79, 188, 175, 142, 177, 208, 46, 96, 20, 18, 231, 208, 29, 120, 102, 122, 17, 46, 31, 155, 30}
result, err := ssz.BitwiseMerkleize(hasher, chunks, count, limit)
require.NoError(t, err)
assert.Equal(t, expected, result)
}
func TestBitwiseMerkleizeOverLimit(t *testing.T) {
hasher := hash.CustomSHA256Hasher()
chunks := [][]byte{
chunks := [][32]byte{
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
{11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
@@ -61,7 +46,19 @@ func TestBitwiseMerkleizeArrays(t *testing.T) {
limit := uint64(2)
expected := [32]byte{138, 81, 210, 194, 151, 231, 249, 241, 64, 118, 209, 58, 145, 109, 225, 89, 118, 110, 159, 220, 193, 183, 203, 124, 166, 24, 65, 26, 160, 215, 233, 219}
result, err := ssz.BitwiseMerkleizeArrays(hasher, chunks, count, limit)
result, err := ssz.BitwiseMerkleize(hasher, chunks, count, limit)
require.NoError(t, err)
assert.Equal(t, expected, result)
chunks = [][32]byte{
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10},
{11, 12, 13, 14, 15, 16, 17, 18, 19, 20},
}
count = uint64(2)
limit = uint64(2)
expected = [32]byte{194, 32, 213, 52, 220, 127, 18, 240, 43, 151, 19, 79, 188, 175, 142, 177, 208, 46, 96, 20, 18, 231, 208, 29, 120, 102, 122, 17, 46, 31, 155, 30}
result, err = ssz.BitwiseMerkleize(hasher, chunks, count, limit)
require.NoError(t, err)
assert.Equal(t, expected, result)
}
@@ -75,25 +72,10 @@ func TestBitwiseMerkleizeArraysOverLimit(t *testing.T) {
count := uint64(2)
limit := uint64(1)
_, err := ssz.BitwiseMerkleizeArrays(hasher, chunks, count, limit)
_, err := ssz.BitwiseMerkleize(hasher, chunks, count, limit)
assert.ErrorContains(t, merkleizingListLimitError, err)
}
func TestPack(t *testing.T) {
byteSlice2D := [][]byte{
{1, 2, 3, 4, 5, 6, 7, 8, 9},
{1, 1, 2, 3, 5, 8, 13, 21, 34},
}
expected := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 1, 2, 3, 5, 8, 13, 21, 34, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}
result, err := ssz.Pack(byteSlice2D)
require.NoError(t, err)
assert.Equal(t, len(expected), len(result[0]))
for i, v := range expected {
assert.DeepEqual(t, v, result[0][i])
}
}
func TestPackByChunk(t *testing.T) {
byteSlice2D := [][]byte{
{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 2, 5, 2, 6, 2, 7},

View File

@@ -25,16 +25,13 @@ func Uint64Root(val uint64) [32]byte {
// a Fork struct value according to the Ethereum
// Simple Serialize specification.
func ForkRoot(fork *ethpb.Fork) ([32]byte, error) {
fieldRoots := make([][]byte, 3)
fieldRoots := make([][32]byte, 3)
if fork != nil {
prevRoot := bytesutil.ToBytes32(fork.PreviousVersion)
fieldRoots[0] = prevRoot[:]
currRoot := bytesutil.ToBytes32(fork.CurrentVersion)
fieldRoots[1] = currRoot[:]
fieldRoots[0] = bytesutil.ToBytes32(fork.PreviousVersion)
fieldRoots[1] = bytesutil.ToBytes32(fork.CurrentVersion)
forkEpochBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(forkEpochBuf, uint64(fork.Epoch))
epochRoot := bytesutil.ToBytes32(forkEpochBuf)
fieldRoots[2] = epochRoot[:]
fieldRoots[2] = bytesutil.ToBytes32(forkEpochBuf)
}
return BitwiseMerkleize(hash.CustomSHA256Hasher(), fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
}
@@ -43,14 +40,12 @@ func ForkRoot(fork *ethpb.Fork) ([32]byte, error) {
// a InitWithReset struct value according to the Ethereum
// Simple Serialize specification.
func CheckpointRoot(hasher HashFn, checkpoint *ethpb.Checkpoint) ([32]byte, error) {
fieldRoots := make([][]byte, 2)
fieldRoots := make([][32]byte, 2)
if checkpoint != nil {
epochBuf := make([]byte, 8)
binary.LittleEndian.PutUint64(epochBuf, uint64(checkpoint.Epoch))
epochRoot := bytesutil.ToBytes32(epochBuf)
fieldRoots[0] = epochRoot[:]
ckpRoot := bytesutil.ToBytes32(checkpoint.Root)
fieldRoots[1] = ckpRoot[:]
fieldRoots[0] = bytesutil.ToBytes32(epochBuf)
fieldRoots[1] = bytesutil.ToBytes32(checkpoint.Root)
}
return BitwiseMerkleize(hasher, fieldRoots, uint64(len(fieldRoots)), uint64(len(fieldRoots)))
}
@@ -59,12 +54,16 @@ func CheckpointRoot(hasher HashFn, checkpoint *ethpb.Checkpoint) ([32]byte, erro
// a list of [32]byte roots according to the Ethereum Simple Serialize
// specification.
func ByteArrayRootWithLimit(roots [][]byte, limit uint64) ([32]byte, error) {
result, err := BitwiseMerkleize(hash.CustomSHA256Hasher(), roots, uint64(len(roots)), limit)
newRoots := make([][32]byte, len(roots))
for i, r := range roots {
copy(newRoots[i][:], r)
}
result, err := BitwiseMerkleize(hash.CustomSHA256Hasher(), newRoots, uint64(len(newRoots)), limit)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute byte array merkleization")
}
buf := new(bytes.Buffer)
if err := binary.Write(buf, binary.LittleEndian, uint64(len(roots))); err != nil {
if err := binary.Write(buf, binary.LittleEndian, uint64(len(newRoots))); err != nil {
return [32]byte{}, errors.Wrap(err, "could not marshal byte array length")
}
// We need to mix in the length of the slice.
@@ -84,7 +83,7 @@ func SlashingsRoot(slashings []uint64) ([32]byte, error) {
binary.LittleEndian.PutUint64(slashBuf, slashings[i])
slashingMarshaling[i] = slashBuf
}
slashingChunks, err := Pack(slashingMarshaling)
slashingChunks, err := PackByChunk(slashingMarshaling)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not pack slashings into chunks")
}
@@ -96,16 +95,16 @@ func SlashingsRoot(slashings []uint64) ([32]byte, error) {
// ExecutionPayload.
func TransactionsRoot(txs [][]byte) ([32]byte, error) {
hasher := hash.CustomSHA256Hasher()
listMarshaling := make([][]byte, 0)
txRoots := make([][32]byte, 0)
for i := 0; i < len(txs); i++ {
rt, err := transactionRoot(txs[i])
if err != nil {
return [32]byte{}, err
}
listMarshaling = append(listMarshaling, rt[:])
txRoots = append(txRoots, rt)
}
bytesRoot, err := BitwiseMerkleize(hasher, listMarshaling, uint64(len(listMarshaling)), fieldparams.MaxTxsPerPayloadLength)
bytesRoot, err := BitwiseMerkleize(hasher, txRoots, uint64(len(txRoots)), fieldparams.MaxTxsPerPayloadLength)
if err != nil {
return [32]byte{}, errors.Wrap(err, "could not compute merkleization")
}
@@ -120,7 +119,7 @@ func TransactionsRoot(txs [][]byte) ([32]byte, error) {
func transactionRoot(tx []byte) ([32]byte, error) {
hasher := hash.CustomSHA256Hasher()
chunkedRoots, err := PackChunks(tx)
chunkedRoots, err := PackByChunk([][]byte{tx})
if err != nil {
return [32]byte{}, err
}
@@ -138,34 +137,3 @@ func transactionRoot(tx []byte) ([32]byte, error) {
copy(bytesRootBufRoot, bytesRootBuf.Bytes())
return MixInLength(bytesRoot, bytesRootBufRoot), nil
}
// PackChunks a given byte array into chunks. It'll pad the last chunk with zero bytes if
// it does not have length bytes per chunk.
func PackChunks(bytes []byte) ([][]byte, error) {
numItems := len(bytes)
var chunks [][]byte
for i := 0; i < numItems; i += 32 {
j := i + 32
// We create our upper bound index of the chunk, if it is greater than numItems,
// we set it as numItems itself.
if j > numItems {
j = numItems
}
// We create chunks from the list of items based on the
// indices determined above.
chunks = append(chunks, bytes[i:j])
}
if len(chunks) == 0 {
return chunks, nil
}
// Right-pad the last chunk with zero bytes if it does not
// have length bytes.
lastChunk := chunks[len(chunks)-1]
for len(lastChunk) < 32 {
lastChunk = append(lastChunk, 0)
}
chunks[len(chunks)-1] = lastChunk
return chunks, nil
}

View File

@@ -48,7 +48,7 @@ func FuzzForkRoot(f *testing.F) {
func FuzzPackChunks(f *testing.F) {
f.Fuzz(func(t *testing.T, b []byte) {
if _, err := ssz.PackChunks(b); err != nil {
if _, err := ssz.PackByChunk([][]byte{b}); err != nil {
t.Fatal(err)
}
})

View File

@@ -123,36 +123,36 @@ func TestTransactionsRoot(t *testing.T) {
}
}
func TestPackChunks(t *testing.T) {
func TestPackByChunk_SingleList(t *testing.T) {
tests := []struct {
name string
input []byte
want [][]byte
want [][32]byte
}{
{
name: "nil",
input: nil,
want: [][]byte{},
want: [][32]byte{{}},
},
{
name: "empty",
input: []byte{},
want: [][]byte{},
want: [][32]byte{{}},
},
{
name: "one",
input: []byte{1},
want: [][]byte{{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
want: [][32]byte{{1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
},
{
name: "one, two",
input: []byte{1, 2},
want: [][]byte{{1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
want: [][32]byte{{1, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := ssz.PackChunks(tt.input)
got, err := ssz.PackByChunk([][]byte{tt.input})
require.NoError(t, err)
require.DeepSSZEqual(t, tt.want, got)
})

View File

@@ -219,18 +219,3 @@ func MerkleizeVector(elements [][32]byte, length uint64) [32]byte {
}
return elements[0]
}
// MerkleizeList uses our optimized routine to hash a 2d-list of
// elements.
func MerkleizeList(elements [][]byte, length uint64) [32]byte {
depth := Depth(length)
// Return zerohash at depth
if len(elements) == 0 {
return trie.ZeroHashes[depth]
}
newElems := make([][32]byte, len(elements))
for i := range elements {
copy(newElems[i][:], elements[i])
}
return MerkleizeVector(newElems, length)
}