mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 07:28:08 -05:00
Compare commits
8 Commits
curie-test
...
v4.4.10
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e45838f3ac | ||
|
|
acd1432d44 | ||
|
|
6b11e20ca6 | ||
|
|
f12e8e3baf | ||
|
|
ba77a74743 | ||
|
|
1ddfe57e5b | ||
|
|
c48ae961a5 | ||
|
|
7059ad0ed4 |
@@ -46,12 +46,11 @@ make dev_docker
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
@@ -25,7 +25,6 @@ RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
@@ -45,7 +44,6 @@ RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
@@ -25,7 +25,6 @@ RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
@@ -45,7 +44,6 @@ RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
@@ -25,7 +25,6 @@ RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
@@ -45,7 +44,6 @@ RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.8"
|
||||
var tag = "v4.4.10"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -882,17 +882,6 @@ error ErrorIncorrectPreviousStateRoot()
|
||||
*Thrown when the previous state root doesn't match stored one.*
|
||||
|
||||
|
||||
### ErrorInvalidBatchHeaderVersion
|
||||
|
||||
```solidity
|
||||
error ErrorInvalidBatchHeaderVersion()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the batch header version is invalid.*
|
||||
|
||||
|
||||
### ErrorLastL1MessageSkipped
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -98,19 +98,6 @@ describe("ScrollChain.blob", async () => {
|
||||
batchHeader0[25] = 1;
|
||||
});
|
||||
|
||||
it("should revert when ErrorInvalidBatchHeaderVersion", async () => {
|
||||
const header = new Uint8Array(121);
|
||||
header[0] = 2;
|
||||
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
await expect(chain.commitBatch(2, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when ErrorNoBlobFound", async () => {
|
||||
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
|
||||
@@ -74,9 +74,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @dev Thrown when the previous state root doesn't match stored one.
|
||||
error ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
/// @dev Thrown when the batch header version is invalid.
|
||||
error ErrorInvalidBatchHeaderVersion();
|
||||
|
||||
/// @dev Thrown when the last message is skipped.
|
||||
error ErrorLastL1MessageSkipped();
|
||||
|
||||
@@ -119,7 +116,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS =
|
||||
52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -310,7 +308,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
batchPtr,
|
||||
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
|
||||
);
|
||||
} else if (_version == 1) {
|
||||
} else if (_version >= 1) {
|
||||
// versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec,
|
||||
// but they use different blob encoding and different verifiers.
|
||||
|
||||
bytes32 blobVersionedHash;
|
||||
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
@@ -322,7 +323,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
// store entries, the order matters
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
@@ -335,8 +336,6 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
batchPtr,
|
||||
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
|
||||
);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
|
||||
// check the length of bitmap
|
||||
@@ -711,18 +710,15 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
version := shr(248, calldataload(_batchHeader.offset))
|
||||
}
|
||||
|
||||
// version should be always 0 or 1 in current code
|
||||
uint256 _length;
|
||||
if (version == 0) {
|
||||
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
|
||||
} else if (version == 1) {
|
||||
} else if (version >= 1) {
|
||||
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
// only check when genesis is imported
|
||||
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
|
||||
|
||||
@@ -15,10 +15,22 @@ interface IL1GasPriceOracle {
|
||||
/// @param scalar The current fee scalar updated.
|
||||
event ScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current commit fee scalar is updated.
|
||||
/// @param scalar The current commit fee scalar updated.
|
||||
event CommitScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current blob fee scalar is updated.
|
||||
/// @param scalar The current blob fee scalar updated.
|
||||
event BlobScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current l1 base fee is updated.
|
||||
/// @param l1BaseFee The current l1 base fee updated.
|
||||
event L1BaseFeeUpdated(uint256 l1BaseFee);
|
||||
|
||||
/// @notice Emitted when current l1 blob base fee is updated.
|
||||
/// @param l1BlobBaseFee The current l1 blob base fee updated.
|
||||
event L1BlobBaseFeeUpdated(uint256 l1BlobBaseFee);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
@@ -26,15 +38,24 @@ interface IL1GasPriceOracle {
|
||||
/// @notice Return the current l1 fee overhead.
|
||||
function overhead() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 fee scalar.
|
||||
/// @notice Return the current l1 fee scalar before Curie fork.
|
||||
function scalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 commit fee scalar.
|
||||
function commitScalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 blob fee scalar.
|
||||
function blobScalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 base fee.
|
||||
function l1BaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 blob base fee.
|
||||
function l1BlobBaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @param data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function getL1Fee(bytes memory data) external view returns (uint256);
|
||||
|
||||
@@ -42,7 +63,7 @@ interface IL1GasPriceOracle {
|
||||
/// represents the per-transaction gas overhead of posting the transaction and state
|
||||
/// roots to L1. Adds 74 bytes of padding to account for the fact that the input does
|
||||
/// not have a signature.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @param data Signed fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function getL1GasUsed(bytes memory data) external view returns (uint256);
|
||||
|
||||
@@ -53,4 +74,9 @@ interface IL1GasPriceOracle {
|
||||
/// @notice Allows whitelisted caller to modify the l1 base fee.
|
||||
/// @param _l1BaseFee New l1 base fee.
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external;
|
||||
|
||||
/// @notice Allows whitelisted caller to modify the l1 base fee.
|
||||
/// @param _l1BaseFee New l1 base fee.
|
||||
/// @param _l1BlobBaseFee New l1 blob base fee.
|
||||
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external;
|
||||
}
|
||||
|
||||
@@ -17,6 +17,28 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when the blob fee scalar exceed `MAX_BLOB_SCALAR`.
|
||||
error ErrExceedMaxBlobScalar();
|
||||
|
||||
/// @dev Thrown when the commit fee scalar exceed `MAX_COMMIT_SCALAR`.
|
||||
error ErrExceedMaxCommitScalar();
|
||||
|
||||
/// @dev Thrown when the l1 fee overhead exceed `MAX_OVERHEAD`.
|
||||
error ErrExceedMaxOverhead();
|
||||
|
||||
/// @dev Thrown when the l1 fee scalar exceed `MAX_SCALAR`.
|
||||
error ErrExceedMaxScalar();
|
||||
|
||||
/// @dev Thrown when the caller is not whitelisted.
|
||||
error ErrCallerNotWhitelisted();
|
||||
|
||||
/// @dev Thrown when we enable Curie fork after Curie fork.
|
||||
error ErrAlreadyInCurieFork();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
@@ -28,9 +50,25 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// Computed based on current l1 block gas limit.
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
|
||||
/// @dev The maximum possible l1 fee scale.
|
||||
/// @dev The maximum possible l1 fee scale before Curie.
|
||||
/// x1000 should be enough.
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
|
||||
|
||||
/// @dev The maximum possible l1 commit fee scalar after Curie.
|
||||
/// We derive the commit scalar by
|
||||
/// ```
|
||||
/// commit_scalar = commit_gas_per_tx * fluctuation_multiplier * 1e9
|
||||
/// ```
|
||||
/// So, the value should not exceed 10^9 * 1e9 normally.
|
||||
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
|
||||
|
||||
/// @dev The maximum possible l1 blob fee scalar after Curie.
|
||||
/// We derive the blob scalar by
|
||||
/// ```
|
||||
/// blob_scalar = fluctuation_multiplier / compression_ratio / blob_util_ratio * 1e9
|
||||
/// ```
|
||||
/// So, the value should not exceed 10^9 * 1e9 normally.
|
||||
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
@@ -48,6 +86,27 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// @notice The address of whitelist contract.
|
||||
IWhitelist public whitelist;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override l1BlobBaseFee;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override commitScalar;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override blobScalar;
|
||||
|
||||
/// @notice Indicates whether the network has gone through the Curie upgrade.
|
||||
bool public isCurie;
|
||||
|
||||
/*************
|
||||
* Modifiers *
|
||||
*************/
|
||||
|
||||
modifier onlyWhitelistedSender() {
|
||||
if (!whitelist.isSenderAllowed(msg.sender)) revert ErrCallerNotWhitelisted();
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -62,15 +121,116 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function getL1Fee(bytes memory _data) external view override returns (uint256) {
|
||||
uint256 _l1GasUsed = getL1GasUsed(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
if (isCurie) {
|
||||
return _getL1FeeCurie(_data);
|
||||
} else {
|
||||
return _getL1FeeBeforeCurie(_data);
|
||||
}
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
/// @dev The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
|
||||
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
|
||||
function getL1GasUsed(bytes memory _data) public view override returns (uint256) {
|
||||
if (isCurie) {
|
||||
// It is near zero since we put all transactions to blob.
|
||||
return 0;
|
||||
} else {
|
||||
return _getL1GasUsedBeforeCurie(_data);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external override onlyWhitelistedSender {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFeeAndBlobBaseFee(
|
||||
uint256 _l1BaseFee,
|
||||
uint256 _l1BlobBaseFee
|
||||
) external override onlyWhitelistedSender {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
l1BlobBaseFee = _l1BlobBaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
emit L1BlobBaseFeeUpdated(_l1BlobBaseFee);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
if (_overhead > MAX_OVERHEAD) revert ErrExceedMaxOverhead();
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_SCALAR) revert ErrExceedMaxScalar();
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the commit scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setCommitScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_COMMIT_SCALAR) revert ErrExceedMaxCommitScalar();
|
||||
|
||||
commitScalar = _scalar;
|
||||
emit CommitScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the blob scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setBlobScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_BLOB_SCALAR) revert ErrExceedMaxBlobScalar();
|
||||
|
||||
blobScalar = _scalar;
|
||||
emit BlobScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(address _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = IWhitelist(_newWhitelist);
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
}
|
||||
|
||||
/// @notice Enable the Curie fork (callable by contract owner).
|
||||
///
|
||||
/// @dev Since this is a predeploy contract, we will directly set the slot while hard fork
|
||||
/// to avoid external owner operations.
|
||||
/// The reason that we keep this function is for easy unit testing.
|
||||
function enableCurie() external onlyOwner {
|
||||
if (isCurie) revert ErrAlreadyInCurieFork();
|
||||
isCurie = true;
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to computes the amount of L1 gas used for a transaction before Curie fork.
|
||||
/// The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
|
||||
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function _getL1GasUsedBeforeCurie(bytes memory _data) private view returns (uint256) {
|
||||
uint256 _total = 0;
|
||||
uint256 _length = _data.length;
|
||||
unchecked {
|
||||
@@ -85,48 +245,22 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external override {
|
||||
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
|
||||
|
||||
l1BaseFee = _l1BaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters, before Curie fork.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function _getL1FeeBeforeCurie(bytes memory _data) private view returns (uint256) {
|
||||
uint256 _l1GasUsed = _getL1GasUsedBeforeCurie(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
require(_scalar <= MAX_SCALE, "exceed maximum scale");
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(address _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = IWhitelist(_newWhitelist);
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters, after Curie fork.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function _getL1FeeCurie(bytes memory _data) private view returns (uint256) {
|
||||
// We have bounded the value of `commitScalar` and `blobScalar`, the whole expression won't overflow.
|
||||
return (commitScalar * l1BaseFee + blobScalar * _data.length * l1BlobBaseFee) / PRECISION;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,14 +4,15 @@ pragma solidity =0.8.24;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {L1BlockContainer} from "../L2/predeploys/L1BlockContainer.sol";
|
||||
import {L1GasPriceOracle} from "../L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {Whitelist} from "../L2/predeploys/Whitelist.sol";
|
||||
|
||||
contract L1GasPriceOracleTest is DSTestPlus {
|
||||
uint256 private constant PRECISION = 1e9;
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
|
||||
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
|
||||
|
||||
L1GasPriceOracle private oracle;
|
||||
Whitelist private whitelist;
|
||||
@@ -36,7 +37,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
hevm.stopPrank();
|
||||
|
||||
// overhead is too large
|
||||
hevm.expectRevert("exceed maximum overhead");
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxOverhead.selector);
|
||||
oracle.setOverhead(MAX_OVERHEAD + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
@@ -46,7 +47,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testSetScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
@@ -55,8 +56,8 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert("exceed maximum scale");
|
||||
oracle.setScalar(MAX_SCALE + 1);
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxScalar.selector);
|
||||
oracle.setScalar(MAX_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.scalar(), 0);
|
||||
@@ -64,6 +65,44 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.scalar(), _scalar);
|
||||
}
|
||||
|
||||
function testSetCommitScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_COMMIT_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setCommitScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxCommitScalar.selector);
|
||||
oracle.setCommitScalar(MAX_COMMIT_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.commitScalar(), 0);
|
||||
oracle.setCommitScalar(_scalar);
|
||||
assertEq(oracle.commitScalar(), _scalar);
|
||||
}
|
||||
|
||||
function testSetBlobScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_BLOB_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setBlobScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxBlobScalar.selector);
|
||||
oracle.setBlobScalar(MAX_COMMIT_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.blobScalar(), 0);
|
||||
oracle.setBlobScalar(_scalar);
|
||||
assertEq(oracle.blobScalar(), _scalar);
|
||||
}
|
||||
|
||||
function testUpdateWhitelist(address _newWhitelist) external {
|
||||
hevm.assume(_newWhitelist != address(whitelist));
|
||||
|
||||
@@ -79,12 +118,29 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(address(oracle.whitelist()), _newWhitelist);
|
||||
}
|
||||
|
||||
function testEnableCurie() external {
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.enableCurie();
|
||||
hevm.stopPrank();
|
||||
|
||||
// call by owner, should succeed
|
||||
assertBoolEq(oracle.isCurie(), false);
|
||||
oracle.enableCurie();
|
||||
assertBoolEq(oracle.isCurie(), true);
|
||||
|
||||
// enable twice, should revert
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrAlreadyInCurieFork.selector);
|
||||
oracle.enableCurie();
|
||||
}
|
||||
|
||||
function testSetL1BaseFee(uint256 _baseFee) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("Not whitelisted sender");
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
|
||||
oracle.setL1BaseFee(_baseFee);
|
||||
hevm.stopPrank();
|
||||
|
||||
@@ -94,7 +150,25 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.l1BaseFee(), _baseFee);
|
||||
}
|
||||
|
||||
function testGetL1GasUsed(uint256 _overhead, bytes memory _data) external {
|
||||
function testSetL1BaseFeeAndBlobBaseFee(uint256 _baseFee, uint256 _blobBaseFee) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
hevm.stopPrank();
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.l1BaseFee(), 0);
|
||||
assertEq(oracle.l1BlobBaseFee(), 0);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
assertEq(oracle.l1BaseFee(), _baseFee);
|
||||
assertEq(oracle.l1BlobBaseFee(), _blobBaseFee);
|
||||
}
|
||||
|
||||
function testGetL1GasUsedBeforeCurie(uint256 _overhead, bytes memory _data) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
@@ -108,14 +182,14 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.getL1GasUsed(_data), _gasUsed);
|
||||
}
|
||||
|
||||
function testGetL1Fee(
|
||||
function testGetL1FeeBeforeCurie(
|
||||
uint256 _baseFee,
|
||||
uint256 _overhead,
|
||||
uint256 _scalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALAR);
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
@@ -130,4 +204,32 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
|
||||
assertEq(oracle.getL1Fee(_data), (_gasUsed * _baseFee * _scalar) / PRECISION);
|
||||
}
|
||||
|
||||
function testGetL1GasUsedCurie(bytes memory _data) external {
|
||||
oracle.enableCurie();
|
||||
assertEq(oracle.getL1GasUsed(_data), 0);
|
||||
}
|
||||
|
||||
function testGetL1FeeCurie(
|
||||
uint256 _baseFee,
|
||||
uint256 _blobBaseFee,
|
||||
uint256 _commitScalar,
|
||||
uint256 _blobScalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_commitScalar = bound(_commitScalar, 0, MAX_COMMIT_SCALAR);
|
||||
_blobScalar = bound(_blobScalar, 0, MAX_BLOB_SCALAR);
|
||||
|
||||
oracle.enableCurie();
|
||||
oracle.setCommitScalar(_commitScalar);
|
||||
oracle.setBlobScalar(_blobScalar);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
|
||||
assertEq(
|
||||
oracle.getL1Fee(_data),
|
||||
(_commitScalar * _baseFee + _blobScalar * _blobBaseFee * _data.length) / PRECISION
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,12 +89,6 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// invalid version, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert(ScrollChain.ErrorInvalidBatchHeaderVersion.selector);
|
||||
rollup.commitBatch(2, batchHeader0, new bytes[](1), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// batch header length too small, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert(BatchHeaderV0Codec.ErrorBatchHeaderLengthTooSmall.selector);
|
||||
|
||||
@@ -42,6 +42,7 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -64,48 +65,31 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return bp
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
type chunkIndexRange struct {
|
||||
start uint64
|
||||
end uint64
|
||||
}
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
|
||||
var start, end = r.start, r.end
|
||||
if o.start < r.start {
|
||||
start = o.start
|
||||
}
|
||||
if o.end > r.end {
|
||||
end = o.end
|
||||
}
|
||||
return &chunkIndexRange{start, end}
|
||||
}
|
||||
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
func (r *chunkIndexRange) contains(start, end uint64) bool {
|
||||
return r.start <= start && r.end > end
|
||||
}
|
||||
|
||||
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
|
||||
|
||||
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
var batchTask *orm.Batch
|
||||
@@ -154,13 +138,25 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
)
|
||||
var err error
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(batchTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by batch", "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProverVersion: proverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
@@ -181,7 +177,7 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
@@ -191,6 +187,107 @@ func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRange == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
chunkRanges [2]*chunkIndexRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRanges[i] == nil {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
chunkRange := chunkRanges[0].merge(*chunkRanges[1])
|
||||
var hardForkName string
|
||||
getHardForkName := func(batch *orm.Batch) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
|
||||
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
|
||||
|
||||
@@ -39,6 +39,7 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -61,20 +62,11 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return cp
|
||||
}
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
|
||||
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
fromBlockNum, toBlockNum := blockRange.from, blockRange.to
|
||||
if toBlockNum > getTaskParameter.ProverHeight {
|
||||
toBlockNum = getTaskParameter.ProverHeight + 1
|
||||
}
|
||||
@@ -127,13 +119,25 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
err error
|
||||
)
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(chunkTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by chunk", "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProverVersion: proverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
@@ -153,7 +157,7 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
@@ -163,6 +167,95 @@ func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinato
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
blockRanges [2]*blockRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
blockRange, err := blockRanges[0].merge(*blockRanges[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hardForkName string
|
||||
getHardForkName := func(chunk *orm.Chunk) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
|
||||
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
type blockRange struct {
|
||||
from uint64
|
||||
to uint64
|
||||
}
|
||||
|
||||
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
|
||||
if r.from == o.to {
|
||||
return &blockRange{o.from, r.to}, nil
|
||||
} else if r.to == o.from {
|
||||
return &blockRange{r.from, o.to}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("two ranges are not adjacent")
|
||||
}
|
||||
|
||||
func (r *blockRange) contains(start, end uint64) bool {
|
||||
return r.from <= start && r.to > end
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
return &blockRange{fromBlockNum, toBlockNum}, nil
|
||||
}
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
|
||||
@@ -29,14 +29,25 @@ type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
func reverseMap(input map[string]string) map[string]string {
|
||||
output := make(map[string]string, len(input))
|
||||
for k, v := range input {
|
||||
output[v] = k
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
// key is hardForkName, value is vk
|
||||
vkMap map[string]string
|
||||
// key is vk, value is hardForkName
|
||||
reverseVkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -74,30 +85,42 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
// signals that the prover is multi-circuits version
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
if len(getTaskParameter.VKs) != 2 {
|
||||
return nil, fmt.Errorf("parameter vks length must be 2")
|
||||
}
|
||||
for _, vk := range getTaskParameter.VKs {
|
||||
if _, exists := b.reverseVkMap[vk]; !exists {
|
||||
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
|
||||
@@ -134,7 +134,12 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
// use hard_fork_name from parameter first
|
||||
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
|
||||
hardForkName := proofParameter.HardForkName
|
||||
if hardForkName == "" {
|
||||
hardForkName = ctx.GetString(coordinatorType.HardForkName)
|
||||
}
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -3,8 +3,8 @@
|
||||
package verifier
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
@@ -164,8 +164,8 @@ func (v *Verifier) loadEmbedVK() error {
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
|
||||
@@ -2,15 +2,17 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
|
||||
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
|
||||
}
|
||||
|
||||
// GetTaskSchema the schema data return to prover for get prover task
|
||||
type GetTaskSchema struct {
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
@@ -3,11 +3,12 @@ package types
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
// TODO when prover have upgrade, need change this field to required
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
}
|
||||
|
||||
@@ -1,74 +0,0 @@
|
||||
[
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "basefee",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "BaseFeeSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "bytes32",
|
||||
"name": "data",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "McopySuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "TloadSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [],
|
||||
"name": "TstoreSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "useBaseFee",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "useMcopy",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "newValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "useTloadTstore",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
@@ -1,221 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(os.Getenv("L2_DEPLOYER_PRIVATE_KEY"), "0x"))
|
||||
if err != nil {
|
||||
log.Crit("failed to create private key", "err", err)
|
||||
}
|
||||
publicKey := privateKey.Public()
|
||||
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
log.Crit("failed to cast public key to ECDSA")
|
||||
}
|
||||
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
|
||||
|
||||
client, err := ethclient.Dial(os.Getenv("SCROLL_L2_DEPLOYMENT_RPC"))
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to network", "err", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, new(big.Int).SetUint64(222222))
|
||||
if err != nil {
|
||||
log.Crit("failed to initialize keyed transactor with chain ID", "err", err)
|
||||
}
|
||||
|
||||
abiJSON, err := os.ReadFile("abi.json")
|
||||
if err != nil {
|
||||
log.Crit("failed to read ABI file", "err", err)
|
||||
}
|
||||
|
||||
l2TestCurieOpcodesMetaData := &bind.MetaData{ABI: string(abiJSON)}
|
||||
l2TestCurieOpcodesAbi, err := l2TestCurieOpcodesMetaData.GetAbi()
|
||||
if err != nil {
|
||||
log.Crit("failed to get abi", "err", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
|
||||
if err != nil {
|
||||
log.Crit("failed to get pending nonce", "err", err)
|
||||
}
|
||||
|
||||
useTloadTstoreCalldata, err := l2TestCurieOpcodesAbi.Pack("useTloadTstore", new(big.Int).SetUint64(9876543210))
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useTloadTstore calldata", "err", err)
|
||||
}
|
||||
|
||||
useMcopyCalldata, err := l2TestCurieOpcodesAbi.Pack("useMcopy")
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useMcopy calldata", "err", err)
|
||||
}
|
||||
|
||||
useBaseFee, err := l2TestCurieOpcodesAbi.Pack("useBaseFee")
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useBaseFee calldata", "err", err)
|
||||
}
|
||||
|
||||
l2TestCurieOpcodesAddr := common.HexToAddress(os.Getenv("L2_TEST_CURIE_OPCODES_ADDR"))
|
||||
|
||||
txTypes := []int{
|
||||
LegacyTxType,
|
||||
AccessListTxType,
|
||||
DynamicFeeTxType,
|
||||
}
|
||||
|
||||
accessLists := []types.AccessList{
|
||||
nil,
|
||||
{
|
||||
{Address: common.HexToAddress("0x0000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x1000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x2000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
}},
|
||||
{Address: common.HexToAddress("0x3000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x4000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // repetitive storage key
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
for _, txType := range txTypes {
|
||||
for _, accessList := range accessLists {
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useTloadTstoreCalldata); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useMcopyCalldata); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useBaseFee); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, nil, []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), nil); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
LegacyTxType = 1
|
||||
AccessListTxType = 2
|
||||
DynamicFeeTxType = 3
|
||||
)
|
||||
|
||||
func sendTransaction(client *ethclient.Client, auth *bind.TransactOpts, txType int, to *common.Address, nonce uint64, accessList types.AccessList, value *big.Int, data []byte) error {
|
||||
var txData types.TxData
|
||||
switch txType {
|
||||
case LegacyTxType:
|
||||
txData = &types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
GasPrice: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
}
|
||||
case AccessListTxType:
|
||||
txData = &types.AccessListTx{
|
||||
ChainID: new(big.Int).SetUint64(222222),
|
||||
Nonce: nonce,
|
||||
GasPrice: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
}
|
||||
case DynamicFeeTxType:
|
||||
txData = &types.DynamicFeeTx{
|
||||
ChainID: new(big.Int).SetUint64(222222),
|
||||
Nonce: nonce,
|
||||
GasTipCap: new(big.Int).SetUint64(1000000000),
|
||||
GasFeeCap: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid transaction type: %d", txType)
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign tx: %w", err)
|
||||
}
|
||||
|
||||
if err = client.SendTransaction(context.Background(), signedTx); err != nil {
|
||||
return fmt.Errorf("failed to send tx: %w", err)
|
||||
}
|
||||
|
||||
log.Info("transaction sent", "txHash", signedTx.Hash().Hex())
|
||||
|
||||
var receipt *types.Receipt
|
||||
for {
|
||||
receipt, err = client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
if err == nil {
|
||||
if receipt.Status != types.ReceiptStatusSuccessful {
|
||||
return fmt.Errorf("transaction failed: %s", signedTx.Hash().Hex())
|
||||
}
|
||||
break
|
||||
}
|
||||
log.Warn("waiting for receipt", "txHash", signedTx.Hash())
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
log.Info("Sent transaction", "txHash", signedTx.Hash().Hex(), "from", auth.From.Hex(), "nonce", signedTx.Nonce(), "to", to.Hex())
|
||||
return nil
|
||||
}
|
||||
@@ -137,7 +137,6 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@@ -262,7 +261,6 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
|
||||
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -321,7 +319,6 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1C
|
||||
github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
|
||||
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
@@ -381,8 +378,8 @@ github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGu
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a h1:5uWQHo/+cXexQGeSrywtXM2z29zRFctmux2vXs3JLrM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
@@ -518,7 +515,6 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
package core
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
mock_abi:
|
||||
@@ -11,8 +11,7 @@ mock_abi:
|
||||
|
||||
libzstd:
|
||||
sudo mkdir -p $(SCROLL_LIB_PATH)/
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/v0.0.0-rc0-ubuntu20.04/libzktrie.so
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/v0.0.0-rc0-ubuntu20.04/libscroll_zstd.so
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/v0.1.0-rc0-ubuntu20.04/libscroll_zstd.so
|
||||
|
||||
rollup_bins: libzstd ## Builds the Rollup bins.
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
|
||||
@@ -101,7 +101,7 @@ var L2MessageQueueMetaData = &bind.MetaData{
|
||||
|
||||
// L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract.
|
||||
var L1GasPriceOracleMetaData = &bind.MetaData{
|
||||
ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n",
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
}
|
||||
|
||||
// IL1ScrollMessengerL2MessageProof is an auto generated low-level Go binding around an user-defined struct.
|
||||
|
||||
@@ -70,13 +70,15 @@
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -10,7 +10,7 @@ require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
||||
@@ -236,8 +236,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a h1:5uWQHo/+cXexQGeSrywtXM2z29zRFctmux2vXs3JLrM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240527110002-d7bcb621a61a/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
|
||||
@@ -33,6 +33,7 @@ type ChunkProposerConfig struct {
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
}
|
||||
|
||||
// BatchProposerConfig loads batch_proposer configuration items.
|
||||
@@ -41,4 +42,5 @@ type BatchProposerConfig struct {
|
||||
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
}
|
||||
|
||||
@@ -36,7 +36,8 @@ type Layer1Relayer struct {
|
||||
gasOracleSender *sender.Sender
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
lastGasPrice uint64
|
||||
lastBaseFee uint64
|
||||
lastBlobBaseFee uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
l1BaseFeeWeight float64
|
||||
@@ -130,37 +131,45 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
block := blocks[0]
|
||||
|
||||
if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
|
||||
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
if r.lastGasPrice > 0 && expectedDelta == 0 {
|
||||
expectedDelta = 1
|
||||
}
|
||||
|
||||
latestL2Height, err := r.l2BlockOrm.GetL2BlocksLatestHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L2 block height from db", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
var isBernoulli = r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height))
|
||||
var isBernoulli = block.BlobBaseFee > 0 && r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height))
|
||||
var isCurie = block.BlobBaseFee > 0 && r.chainCfg.IsCurie(new(big.Int).SetUint64(latestL2Height))
|
||||
|
||||
var baseFee uint64
|
||||
if isBernoulli && block.BlobBaseFee != 0 {
|
||||
var blobBaseFee uint64
|
||||
if isCurie {
|
||||
baseFee = block.BaseFee
|
||||
blobBaseFee = block.BlobBaseFee
|
||||
} else if isBernoulli {
|
||||
baseFee = uint64(math.Ceil(r.l1BaseFeeWeight*float64(block.BaseFee) + r.l1BlobBaseFeeWeight*float64(block.BlobBaseFee)))
|
||||
} else {
|
||||
baseFee = block.BaseFee
|
||||
}
|
||||
|
||||
// last is undefined or (baseFee >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (baseFee >= r.minGasPrice && (baseFee >= r.lastGasPrice+expectedDelta || baseFee <= r.lastGasPrice-expectedDelta)) {
|
||||
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "isBernoulli", isBernoulli, "err", err)
|
||||
return
|
||||
if r.shouldUpdateGasOracle(baseFee, blobBaseFee, isCurie) {
|
||||
var data []byte
|
||||
if isCurie {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -169,9 +178,12 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = baseFee
|
||||
r.metrics.rollupL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "isBernoulli", isBernoulli)
|
||||
|
||||
r.lastBaseFee = baseFee
|
||||
r.lastBlobBaseFee = blobBaseFee
|
||||
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
|
||||
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -219,3 +231,34 @@ func (r *Layer1Relayer) StopSenders() {
|
||||
r.gasOracleSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64, isCurie bool) bool {
|
||||
// Right after restarting.
|
||||
if r.lastBaseFee == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
|
||||
if baseFee >= r.minGasPrice && (baseFee >= r.lastBaseFee+expectedBaseFeeDelta || baseFee+expectedBaseFeeDelta <= r.lastBaseFee) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Omitting blob base fee checks before Curie.
|
||||
if !isCurie {
|
||||
return false
|
||||
}
|
||||
|
||||
// Right after enabling Curie.
|
||||
if r.lastBlobBaseFee == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBlobBaseFeeDelta := r.lastBlobBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
// Plus a minimum of 0.01 gwei, since the blob base fee is usually low, preventing short-time flunctuation.
|
||||
expectedBlobBaseFeeDelta += 10000000
|
||||
if blobBaseFee >= r.minGasPrice && (blobBaseFee >= r.lastBlobBaseFee+expectedBlobBaseFeeDelta || blobBaseFee+expectedBlobBaseFeeDelta <= r.lastBlobBaseFee) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,7 +9,8 @@ import (
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL1RelayerLatestBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
}
|
||||
@@ -26,9 +27,13 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
Name: "rollup_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
rollupL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of rollup relayer l1",
|
||||
rollupL1RelayerLatestBaseFee: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer1_latest_base_fee",
|
||||
Help: "The latest base fee of l1 rollup relayer",
|
||||
}),
|
||||
rollupL1RelayerLatestBlobBaseFee: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer1_latest_blob_base_fee",
|
||||
Help: "The latest blob base fee of l1 rollup relayer",
|
||||
}),
|
||||
rollupL1UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_update_gas_oracle_confirmed_total",
|
||||
|
||||
@@ -32,7 +32,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
return db
|
||||
}
|
||||
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
// testCreateNewL1Relayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -311,7 +311,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
}
|
||||
|
||||
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64+expectedDelta <= r.lastGasPrice)) {
|
||||
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
|
||||
|
||||
@@ -319,10 +319,10 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
if txType == LegacyTxType { // Legacy transactions can not have an access list.
|
||||
assert.Equal(t, uint64(43956), gasLimit)
|
||||
assert.Equal(t, uint64(43935), gasLimit)
|
||||
assert.Nil(t, accessList)
|
||||
} else { // Dynamic fee and blob transactions can have an access list.
|
||||
assert.Equal(t, uint64(43479), gasLimit)
|
||||
assert.Equal(t, uint64(43458), gasLimit)
|
||||
assert.NotNil(t, accessList)
|
||||
}
|
||||
|
||||
|
||||
@@ -34,6 +34,7 @@ type BatchProposer struct {
|
||||
maxL1CommitCalldataSizePerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
forkMap map[uint64]bool
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
@@ -61,6 +62,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
p := &BatchProposer{
|
||||
@@ -73,6 +75,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
forkMap: forkMap,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -235,13 +238,12 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
p.recordTimerBatchMetrics(metrics)
|
||||
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
|
||||
if i == 0 {
|
||||
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize, p.maxUncompressedBatchBytesSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
@@ -251,7 +253,9 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
"overEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGas", p.maxL1CommitGasPerBatch,
|
||||
"l1CommitBlobSize", metrics.L1CommitBlobSize,
|
||||
"maxBlobSize", maxBlobSize)
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
|
||||
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
|
||||
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
|
||||
|
||||
@@ -287,9 +287,9 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -431,6 +431,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
@@ -441,9 +442,9 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -451,6 +452,7 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
@@ -604,9 +606,9 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -634,7 +636,7 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
@@ -677,15 +679,16 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -693,6 +696,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -713,7 +717,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
@@ -760,6 +764,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
blockHeight := int64(0)
|
||||
@@ -780,6 +785,7 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
@@ -851,6 +857,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
@@ -866,6 +873,7 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
|
||||
@@ -19,8 +19,6 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
const maxBlobSize = uint64(131072)
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
type ChunkProposer struct {
|
||||
ctx context.Context
|
||||
@@ -36,6 +34,7 @@ type ChunkProposer struct {
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
forkHeights []uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
@@ -67,6 +66,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
p := &ChunkProposer{
|
||||
@@ -81,6 +81,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
forkHeights: forkHeights,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -186,6 +187,7 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() error {
|
||||
// unchunkedBlockHeight >= 1, assuming genesis batch with chunk 0, block 0 is committed.
|
||||
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -216,6 +218,17 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
codecVersion = encoding.CodecV2
|
||||
}
|
||||
|
||||
// Including Curie block in a sole chunk.
|
||||
if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 {
|
||||
chunk := encoding.Chunk{Blocks: blocks[:1]}
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
p.recordTimerChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, *metrics)
|
||||
}
|
||||
|
||||
var chunk encoding.Chunk
|
||||
for i, block := range blocks {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
@@ -232,11 +245,12 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
metrics.L1CommitBlobSize > maxBlobSize ||
|
||||
metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
|
||||
if i == 0 {
|
||||
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v",
|
||||
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize)
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
|
||||
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize, p.maxUncompressedBatchBytesSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
@@ -250,7 +264,9 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
"rowConsumption", metrics.CrcMax,
|
||||
"maxRowConsumption", p.maxRowConsumptionPerChunk,
|
||||
"l1CommitBlobSize", metrics.L1CommitBlobSize,
|
||||
"maxBlobSize", maxBlobSize)
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
|
||||
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
|
||||
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
|
||||
|
||||
|
||||
@@ -544,6 +544,7 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
@@ -593,6 +594,7 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@@ -624,3 +626,39 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(0); i < 10; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2)}, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, chunks, 2)
|
||||
for i, chunk := range chunks {
|
||||
assert.Equal(t, uint64(i+1), chunk.EndBlockNumber)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
package watcher
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
const maxBlobSize = uint64(131072)
|
||||
|
||||
@@ -109,6 +109,7 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
|
||||
t.Run("TestChunkProposerCodecv2Limits", testChunkProposerCodecv2Limits)
|
||||
t.Run("TestChunkProposerBlobSizeLimit", testChunkProposerBlobSizeLimit)
|
||||
t.Run("TestChunkProposerIncludeCurieBlockInOneChunk", testChunkProposerIncludeCurieBlockInOneChunk)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
|
||||
|
||||
@@ -83,6 +83,9 @@ type ChunkMetrics struct {
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// codecv2 metrics, default 0 for codecv0 & codecv1
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
// timing metrics
|
||||
EstimateGasTime time.Duration
|
||||
EstimateCalldataSizeTime time.Duration
|
||||
@@ -143,7 +146,7 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBlobSize(chunk)
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit blob size: %w", err)
|
||||
@@ -166,6 +169,9 @@ type BatchMetrics struct {
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// codecv2 metrics, default 0 for codecv0 & codecv1
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
// timing metrics
|
||||
EstimateGasTime time.Duration
|
||||
EstimateCalldataSizeTime time.Duration
|
||||
@@ -220,7 +226,7 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBlobSize(batch)
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit blob size: %w", err)
|
||||
|
||||
@@ -82,14 +82,25 @@ contract MockBridge {
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
uint256 public l1BaseFee;
|
||||
uint256 public l1BlobBaseFee;
|
||||
uint256 public l2BaseFee;
|
||||
uint256 public lastFinalizedBatchIndex;
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
mapping(uint256 => bytes32) public finalizedStateRoots;
|
||||
mapping(uint256 => bytes32) public withdrawRoots;
|
||||
|
||||
function setL2BaseFee(uint256 _newL2BaseFee) external {
|
||||
l2BaseFee = _newL2BaseFee;
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
}
|
||||
|
||||
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
l1BlobBaseFee = _l1BlobBaseFee;
|
||||
}
|
||||
|
||||
function setL2BaseFee(uint256 _l2BaseFee) external {
|
||||
l2BaseFee = _l2BaseFee;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
|
||||
@@ -1,12 +1,11 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Download .so files
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.1.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
# Set the environment variable
|
||||
|
||||
@@ -1,196 +0,0 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm -ldl -lscroll_zstd
|
||||
#include <stdint.h>
|
||||
char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size);
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
if len(os.Args) < 3 {
|
||||
log.Crit("Please provide start and end batch indices: ./script <start_index> <end_index>")
|
||||
return
|
||||
}
|
||||
|
||||
startIndexStr := os.Args[1]
|
||||
endIndexStr := os.Args[2]
|
||||
|
||||
startIndex, err := strconv.Atoi(startIndexStr)
|
||||
if err != nil || startIndex <= 0 {
|
||||
log.Crit("Invalid start batch index", "indexStr", startIndexStr, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
endIndex, err := strconv.Atoi(endIndexStr)
|
||||
if err != nil || endIndex <= 0 {
|
||||
log.Crit("Invalid end batch index", "indexStr", endIndexStr, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if startIndex > endIndex {
|
||||
log.Crit("Start index must be less than or equal to end index")
|
||||
return
|
||||
}
|
||||
|
||||
db, err := database.InitDB(&database.Config{
|
||||
DriverName: "postgres",
|
||||
DSN: "postgres://postgres:scroll2022@localhost:7432/scroll",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
totalRawSize := uint64(0)
|
||||
totalCompressedSize := uint64(0)
|
||||
|
||||
for i := startIndex; i <= endIndex; i++ {
|
||||
batchIndex := uint64(i)
|
||||
dbBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch", "index", batchIndex, "err", err)
|
||||
}
|
||||
|
||||
dbParentBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex-1)
|
||||
if err != nil {
|
||||
log.Crit("failed to get parent batch", "index", batchIndex-1, "err", err)
|
||||
}
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch chunks", "err", err)
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch blocks", "err", err)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
raw, compressed, blobSize, err := estimateBatchL1CommitBlobSize(batch)
|
||||
if err != nil {
|
||||
log.Crit("failed to estimate batch l1 commit blob size", "err", err)
|
||||
}
|
||||
|
||||
// compression_ratio = preimage_bytes / compressed_bytes
|
||||
log.Info("compression ratio", "batch index", batch.Index, "raw length", raw, "compressed length", compressed, "blobSize", blobSize, "ratio", float64(raw)/float64(compressed))
|
||||
|
||||
totalRawSize += raw
|
||||
totalCompressedSize += compressed
|
||||
}
|
||||
|
||||
batchCount := endIndex - startIndex + 1
|
||||
averageRawSize := float64(totalRawSize) / float64(batchCount)
|
||||
averageCompressedSize := float64(totalCompressedSize) / float64(batchCount)
|
||||
|
||||
log.Info("Average compression ratio", "average raw length", averageRawSize, "average compressed length", averageCompressedSize, "ratio", averageRawSize/averageCompressedSize)
|
||||
}
|
||||
|
||||
func estimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, uint64, uint64, error) {
|
||||
batchBytes, err := constructBatchPayload(b.Chunks)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
blobBytes, err := compressScrollBatchBytes(batchBytes)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
return uint64(len(batchBytes)), uint64(len(blobBytes)), codecv1.CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
|
||||
}
|
||||
|
||||
// constructBatchPayload constructs the batch payload.
|
||||
// This function is only used in compressed batch payload length estimation.
|
||||
func constructBatchPayload(chunks []*encoding.Chunk) ([]byte, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + 45*4
|
||||
|
||||
// the raw (un-compressed and un-padded) blob payload
|
||||
batchBytes := make([]byte, metadataLength)
|
||||
|
||||
// batch metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks)))
|
||||
|
||||
// encode batch metadata and L2 transactions,
|
||||
for chunkID, chunk := range chunks {
|
||||
currentChunkStartIndex := len(batchBytes)
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
|
||||
// encode L2 txs into batch payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx, false /* no mock */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
batchBytes = append(batchBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
|
||||
// batch metadata: chunki_size
|
||||
if chunkSize := len(batchBytes) - currentChunkStartIndex; chunkSize != 0 {
|
||||
binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
}
|
||||
}
|
||||
return batchBytes, nil
|
||||
}
|
||||
|
||||
// compressScrollBatchBytes compresses the given batch of bytes.
|
||||
// The output buffer is allocated with an extra 128 bytes to accommodate metadata overhead or error message.
|
||||
func compressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
|
||||
srcSize := C.uint64_t(len(batchBytes))
|
||||
outbufSize := C.uint64_t(len(batchBytes) + 128) // Allocate output buffer with extra 128 bytes
|
||||
outbuf := make([]byte, outbufSize)
|
||||
|
||||
if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize,
|
||||
(*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil {
|
||||
return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err))
|
||||
}
|
||||
|
||||
return outbuf[:int(outbufSize)], nil
|
||||
}
|
||||
@@ -40,8 +40,8 @@ var (
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
// l1Auth
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
)
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
@@ -82,6 +82,7 @@ func setupEnv(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
l1GethChainID *big.Int
|
||||
l2GethChainID *big.Int
|
||||
)
|
||||
|
||||
testApps = tc.NewTestcontainerApps()
|
||||
@@ -96,6 +97,8 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
l1GethChainID, err = l1Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l2GethChainID, err = l2Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg, l2Cfg := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Cfg.Confirmations = 0
|
||||
@@ -103,17 +106,16 @@ func setupEnv(t *testing.T) {
|
||||
l2Cfg.Confirmations = 0
|
||||
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, l1GethChainID)
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(l2Cfg.RelayerConfig.CommitSenderPrivateKey, l1GethChainID)
|
||||
assert.NoError(t, err)
|
||||
rollupApp.Config.L1Config.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
rollupApp.Config.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(l1Cfg.RelayerConfig.GasOracleSenderPrivateKey, l2GethChainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort := strconv.FormatInt(port.Int64()+40000, 10)
|
||||
rollupApp.Config.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
l2Cfg.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
@@ -137,7 +139,7 @@ func prepareContracts(t *testing.T) {
|
||||
// L1 ScrolChain contract
|
||||
nonce, err := l1Client.PendingNonceAt(context.Background(), l1Auth.From)
|
||||
assert.NoError(t, err)
|
||||
scrollChainAddress := crypto.CreateAddress(l1Auth.From, nonce)
|
||||
mockL1ContractAddress := crypto.CreateAddress(l1Auth.From, nonce)
|
||||
tx := types.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err := l1Auth.Signer(l1Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
@@ -145,23 +147,51 @@ func prepareContracts(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
_, isPending, err := l1Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
_, isPending, getErr := l1Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
return getErr == nil && !isPending
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
return getErr == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
code, getErr := l1Client.CodeAt(context.Background(), mockL1ContractAddress, nil)
|
||||
return getErr == nil && len(code) > 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// L2 ScrolChain contract
|
||||
nonce, err = l2Client.PendingNonceAt(context.Background(), l2Auth.From)
|
||||
assert.NoError(t, err)
|
||||
mockL2ContractAddress := crypto.CreateAddress(l2Auth.From, nonce)
|
||||
tx = types.NewContractCreation(nonce, big.NewInt(0), 2000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err = l2Auth.Signer(l2Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l2Client.SendTransaction(context.Background(), signedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
_, isPending, err := l2Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
return err == nil && !isPending
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
receipt, err := l2Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
code, err := l1Client.CodeAt(context.Background(), scrollChainAddress, nil)
|
||||
code, err := l2Client.CodeAt(context.Background(), mockL2ContractAddress, nil)
|
||||
return err == nil && len(code) > 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l1Config, l2Config := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Config.ScrollChainContractAddress = scrollChainAddress
|
||||
l2Config.RelayerConfig.RollupContractAddress = scrollChainAddress
|
||||
l1Config.ScrollChainContractAddress = mockL1ContractAddress
|
||||
l2Config.RelayerConfig.RollupContractAddress = mockL1ContractAddress
|
||||
|
||||
l2Config.RelayerConfig.GasPriceOracleContractAddress = mockL1ContractAddress
|
||||
l1Config.RelayerConfig.GasPriceOracleContractAddress = mockL2ContractAddress
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
@@ -183,5 +213,6 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportL1GasPriceAfterCurie", testImportL1GasPriceAfterCurie)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
}
|
||||
|
||||
@@ -65,6 +65,51 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportL1GasPriceAfterCurie(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number, startHeight-1)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -2,6 +2,7 @@ package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -229,12 +230,14 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerChunk: 100000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 100000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
@@ -366,12 +369,14 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
@@ -493,12 +498,14 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
|
||||
@@ -177,7 +177,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db.WithContext(ctx)
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
|
||||
if err := db.Create(&newBatch).Error; err != nil {
|
||||
|
||||
Reference in New Issue
Block a user