mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
1 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb09024821 |
@@ -210,6 +210,10 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
return nil, fmt.Errorf("too many chunks in batch")
|
||||
}
|
||||
|
||||
if len(batch.Chunks) == 0 {
|
||||
return nil, fmt.Errorf("too few chunks in batch")
|
||||
}
|
||||
|
||||
// batch data hash
|
||||
dataHash, err := computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
@@ -284,9 +288,6 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// the raw (un-padded) blob payload
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// the number of chunks that contain at least one L2 transaction
|
||||
numNonEmptyChunks := 0
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
@@ -294,42 +295,47 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks)))
|
||||
|
||||
// encode blob metadata and L2 transactions,
|
||||
// and simultaneously also build challenge preimage
|
||||
for chunkID, chunk := range chunks {
|
||||
currentChunkStartIndex := len(blobBytes)
|
||||
hasL2Tx := false
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
hasL2Tx = true
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// blob metadata: chunki_size
|
||||
chunkSize := len(blobBytes) - currentChunkStartIndex
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
|
||||
if hasL2Tx {
|
||||
numNonEmptyChunks++
|
||||
if chunkSize := len(blobBytes) - currentChunkStartIndex; chunkSize != 0 {
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
}
|
||||
|
||||
// challenge: compute chunk data hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], hash[:])
|
||||
chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(numNonEmptyChunks))
|
||||
// if we have fewer than MaxNumChunks chunks, the rest
|
||||
// of the blob metadata is correctly initialized to 0,
|
||||
// but we need to add padding to the challenge preimage
|
||||
for chunkID := len(chunks); chunkID < MaxNumChunks; chunkID++ {
|
||||
// use the last chunk's data hash as padding
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// challenge: compute metadata hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[0:metadataLength])
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.74"
|
||||
var tag = "v4.3.75"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
Reference in New Issue
Block a user