mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
2 Commits
fix_deploy
...
v4.3.75
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
cb09024821 | ||
|
|
8bd4277c13 |
@@ -210,6 +210,10 @@ func NewDABatch(batch *encoding.Batch) (*DABatch, error) {
|
||||
return nil, fmt.Errorf("too many chunks in batch")
|
||||
}
|
||||
|
||||
if len(batch.Chunks) == 0 {
|
||||
return nil, fmt.Errorf("too few chunks in batch")
|
||||
}
|
||||
|
||||
// batch data hash
|
||||
dataHash, err := computeBatchDataHash(batch.Chunks, batch.TotalL1MessagePoppedBefore)
|
||||
if err != nil {
|
||||
@@ -284,9 +288,6 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// the raw (un-padded) blob payload
|
||||
blobBytes := make([]byte, metadataLength)
|
||||
|
||||
// the number of chunks that contain at least one L2 transaction
|
||||
numNonEmptyChunks := 0
|
||||
|
||||
// challenge digest preimage
|
||||
// 1 hash for metadata and 1 for each chunk
|
||||
challengePreimage := make([]byte, (1+MaxNumChunks)*32)
|
||||
@@ -294,42 +295,47 @@ func constructBlobPayload(chunks []*encoding.Chunk) (*kzg4844.Blob, *kzg4844.Poi
|
||||
// the challenge point z
|
||||
var z kzg4844.Point
|
||||
|
||||
// the chunk data hash used for calculating the challenge preimage
|
||||
var chunkDataHash common.Hash
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(len(chunks)))
|
||||
|
||||
// encode blob metadata and L2 transactions,
|
||||
// and simultaneously also build challenge preimage
|
||||
for chunkID, chunk := range chunks {
|
||||
currentChunkStartIndex := len(blobBytes)
|
||||
hasL2Tx := false
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type != types.L1MessageTxType {
|
||||
hasL2Tx = true
|
||||
// encode L2 txs into blob payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
blobBytes = append(blobBytes, rlpTxData...)
|
||||
continue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// blob metadata: chunki_size
|
||||
chunkSize := len(blobBytes) - currentChunkStartIndex
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
|
||||
if hasL2Tx {
|
||||
numNonEmptyChunks++
|
||||
if chunkSize := len(blobBytes) - currentChunkStartIndex; chunkSize != 0 {
|
||||
binary.BigEndian.PutUint32(blobBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
}
|
||||
|
||||
// challenge: compute chunk data hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], hash[:])
|
||||
chunkDataHash = crypto.Keccak256Hash(blobBytes[currentChunkStartIndex:])
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// blob metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(blobBytes[0:], uint16(numNonEmptyChunks))
|
||||
// if we have fewer than MaxNumChunks chunks, the rest
|
||||
// of the blob metadata is correctly initialized to 0,
|
||||
// but we need to add padding to the challenge preimage
|
||||
for chunkID := len(chunks); chunkID < MaxNumChunks; chunkID++ {
|
||||
// use the last chunk's data hash as padding
|
||||
copy(challengePreimage[32+chunkID*32:], chunkDataHash[:])
|
||||
}
|
||||
|
||||
// challenge: compute metadata hash
|
||||
hash := crypto.Keccak256Hash(blobBytes[0:metadataLength])
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.74"
|
||||
var tag = "v4.3.75"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -92,12 +92,10 @@ contract DeployL1BridgeContracts is Script {
|
||||
}
|
||||
|
||||
function deployMultipleVersionRollupVerifier() internal {
|
||||
uint256[] memory _versions = new uint256[](2);
|
||||
address[] memory _verifiers = new address[](2);
|
||||
uint256[] memory _versions = new uint256[](1);
|
||||
address[] memory _verifiers = new address[](1);
|
||||
_versions[0] = 0;
|
||||
_verifiers[0] = address(zkEvmVerifierV1);
|
||||
_versions[1] = 1;
|
||||
_verifiers[1] = address(zkEvmVerifierV1);
|
||||
rollupVerifier = new MultipleVersionRollupVerifier(L1_SCROLL_CHAIN_PROXY_ADDR, _versions, _verifiers);
|
||||
|
||||
logAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR", address(rollupVerifier));
|
||||
|
||||
@@ -99,18 +99,6 @@ interface IScrollChain {
|
||||
bytes calldata aggrProof
|
||||
) external;
|
||||
|
||||
/// @notice Finalize a committed batch on layer 1 without providing proof.
|
||||
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch.
|
||||
/// @param prevStateRoot The state root of parent batch.
|
||||
/// @param postStateRoot The state root of current batch.
|
||||
/// @param withdrawRoot The withdraw trie root of current batch.
|
||||
function finalizeBatch(
|
||||
bytes calldata batchHeader,
|
||||
bytes32 prevStateRoot,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot
|
||||
) external;
|
||||
|
||||
/// @notice Finalize a committed batch (with blob) on layer 1.
|
||||
///
|
||||
/// @dev Memory layout of `blobDataProof`:
|
||||
@@ -132,18 +120,4 @@ interface IScrollChain {
|
||||
bytes calldata blobDataProof,
|
||||
bytes calldata aggrProof
|
||||
) external;
|
||||
|
||||
/// @notice Finalize a committed batch (with blob) on layer 1 without providing proof.
|
||||
/// @param batchHeader The header of current batch, see the encoding in comments of `commitBatch`.
|
||||
/// @param prevStateRoot The state root of parent batch.
|
||||
/// @param postStateRoot The state root of current batch.
|
||||
/// @param withdrawRoot The withdraw trie root of current batch.
|
||||
/// @param blobDataProof The proof for blob data.
|
||||
function finalizeBatch4844(
|
||||
bytes calldata batchHeader,
|
||||
bytes32 prevStateRoot,
|
||||
bytes32 postStateRoot,
|
||||
bytes32 withdrawRoot,
|
||||
bytes calldata blobDataProof
|
||||
) external;
|
||||
}
|
||||
|
||||
@@ -426,45 +426,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function finalizeBatch(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot
|
||||
) external override OnlyProver whenNotPaused {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
_popL1Messages(
|
||||
BatchHeaderV0Codec.getSkippedBitmapPtr(memPtr),
|
||||
BatchHeaderV0Codec.getTotalL1MessagePopped(memPtr),
|
||||
BatchHeaderV0Codec.getL1MessagePopped(memPtr)
|
||||
);
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
/// @dev Memory layout of `_blobDataProof`:
|
||||
/// ```text
|
||||
@@ -546,59 +507,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/// @inheritdoc IScrollChain
|
||||
function finalizeBatch4844(
|
||||
bytes calldata _batchHeader,
|
||||
bytes32 _prevStateRoot,
|
||||
bytes32 _postStateRoot,
|
||||
bytes32 _withdrawRoot,
|
||||
bytes calldata _blobDataProof
|
||||
) external override OnlyProver whenNotPaused {
|
||||
if (_prevStateRoot == bytes32(0)) revert ErrorPreviousStateRootIsZero();
|
||||
if (_postStateRoot == bytes32(0)) revert ErrorStateRootIsZero();
|
||||
|
||||
// compute batch hash and verify
|
||||
(uint256 memPtr, bytes32 _batchHash, uint256 _batchIndex, ) = _loadBatchHeader(_batchHeader);
|
||||
bytes32 _blobVersionedHash = BatchHeaderV1Codec.getBlobVersionedHash(memPtr);
|
||||
|
||||
// Calls the point evaluation precompile and verifies the output
|
||||
{
|
||||
(bool success, bytes memory data) = POINT_EVALUATION_PRECOMPILE_ADDR.staticcall(
|
||||
abi.encodePacked(_blobVersionedHash, _blobDataProof)
|
||||
);
|
||||
// We verify that the point evaluation precompile call was successful by testing the latter 32 bytes of the
|
||||
// response is equal to BLS_MODULUS as defined in https://eips.ethereum.org/EIPS/eip-4844#point-evaluation-precompile
|
||||
if (!success) revert ErrorCallPointEvaluationPrecompileFailed();
|
||||
(, uint256 result) = abi.decode(data, (uint256, uint256));
|
||||
if (result != BLS_MODULUS) revert ErrorUnexpectedPointEvaluationPrecompileOutput();
|
||||
}
|
||||
|
||||
// verify previous state root.
|
||||
if (finalizedStateRoots[_batchIndex - 1] != _prevStateRoot) revert ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
// avoid duplicated verification
|
||||
if (finalizedStateRoots[_batchIndex] != bytes32(0)) revert ErrorBatchIsAlreadyVerified();
|
||||
|
||||
// check and update lastFinalizedBatchIndex
|
||||
unchecked {
|
||||
if (lastFinalizedBatchIndex + 1 != _batchIndex) revert ErrorIncorrectBatchIndex();
|
||||
lastFinalizedBatchIndex = _batchIndex;
|
||||
}
|
||||
|
||||
// record state root and withdraw root
|
||||
finalizedStateRoots[_batchIndex] = _postStateRoot;
|
||||
withdrawRoots[_batchIndex] = _withdrawRoot;
|
||||
|
||||
// Pop finalized and non-skipped message from L1MessageQueue.
|
||||
_popL1Messages(
|
||||
BatchHeaderV1Codec.getSkippedBitmapPtr(memPtr),
|
||||
BatchHeaderV1Codec.getTotalL1MessagePopped(memPtr),
|
||||
BatchHeaderV1Codec.getL1MessagePopped(memPtr)
|
||||
);
|
||||
|
||||
emit FinalizeBatch(_batchIndex, _batchHash, _postStateRoot, _withdrawRoot);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
@@ -969,7 +877,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
uint256 _numTransactionsInBlock = ChunkCodecV1.getNumTransactions(chunkPtr);
|
||||
if (_numTransactionsInBlock < _numL1MessagesInBlock) revert ErrorNumTxsLessThanNumL1Msgs();
|
||||
unchecked {
|
||||
_totalTransactionsInChunk += (dataPtr - startPtr) / 32; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += dataPtr - startPtr; // number of non-skipped l1 messages
|
||||
_totalTransactionsInChunk += _numTransactionsInBlock - _numL1MessagesInBlock; // number of l2 txs
|
||||
_totalL1MessagesPoppedInBatch += _numL1MessagesInBlock;
|
||||
_totalL1MessagesPoppedOverall += _numL1MessagesInBlock;
|
||||
|
||||
Reference in New Issue
Block a user