mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-13 07:57:58 -05:00
Compare commits
69 Commits
refactor/p
...
curie-test
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
d8be3f95a9 | ||
|
|
30653f2d6c | ||
|
|
8f7548e0bc | ||
|
|
3be39fa807 | ||
|
|
37f875924d | ||
|
|
07f1691c92 | ||
|
|
a3879f30ef | ||
|
|
060cb2570d | ||
|
|
8b83d28963 | ||
|
|
c0a6d2b304 | ||
|
|
c36fcc958e | ||
|
|
ea9c9f0c07 | ||
|
|
4cfe815b1d | ||
|
|
0f7e74ff70 | ||
|
|
224089a625 | ||
|
|
657a252ab4 | ||
|
|
39a9c1f80b | ||
|
|
14d2f0c06c | ||
|
|
90d87739f7 | ||
|
|
4dd086e481 | ||
|
|
bda2219631 | ||
|
|
c773457ece | ||
|
|
84aedfcee7 | ||
|
|
03041420ec | ||
|
|
e417322635 | ||
|
|
e657d55498 | ||
|
|
b215558a42 | ||
|
|
9dca413294 | ||
|
|
2ba544a6f2 | ||
|
|
667e906c64 | ||
|
|
a84c18e79f | ||
|
|
900e161cfc | ||
|
|
16691a12a3 | ||
|
|
191078b6c5 | ||
|
|
c9e82b5eea | ||
|
|
1dda44d8bf | ||
|
|
4fd4adf330 | ||
|
|
f023e148f9 | ||
|
|
7d5f90bfe5 | ||
|
|
9ec78b4b99 | ||
|
|
3ebc4cb2b6 | ||
|
|
b9e9457577 | ||
|
|
aa01ea96fa | ||
|
|
4975a078fa | ||
|
|
cec4467dcd | ||
|
|
43a72bc352 | ||
|
|
daedc98b29 | ||
|
|
80bb47de64 | ||
|
|
5792d950c0 | ||
|
|
f430a8e98b | ||
|
|
2a066bac2a | ||
|
|
56db1a8de1 | ||
|
|
072e7ec77f | ||
|
|
f4d26e955d | ||
|
|
221cec132d | ||
|
|
7a1fe906bc | ||
|
|
8be3a6d3b5 | ||
|
|
785d2190d0 | ||
|
|
78e5b50b9a | ||
|
|
c3909d1dd2 | ||
|
|
7cc7de4b56 | ||
|
|
bd9e999292 | ||
|
|
cf83df64a0 | ||
|
|
c580cd5c95 | ||
|
|
3eb08b7ad4 | ||
|
|
5a6d5bf267 | ||
|
|
dd2ee77c0f | ||
|
|
4240be9dec | ||
|
|
a93a23db21 |
6
.github/workflows/docker.yml
vendored
6
.github/workflows/docker.yml
vendored
@@ -46,7 +46,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/event_watcher.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -91,7 +91,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/gas_oracle.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
@@ -136,7 +136,7 @@ jobs:
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/rollup_relayer.Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: |
|
||||
${{ secrets.DOCKERHUB_USERNAME }}/${{ env.REPOSITORY }}:${{ env.IMAGE_TAG }}
|
||||
|
||||
2
.github/workflows/rollup.yml
vendored
2
.github/workflows/rollup.yml
vendored
@@ -105,7 +105,7 @@ jobs:
|
||||
- name: Test rollup packages
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
make test
|
||||
./run_test.sh
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -20,5 +20,3 @@ coverage.txt
|
||||
# misc
|
||||
sftp-config.json
|
||||
*~
|
||||
|
||||
target
|
||||
|
||||
12
README.md
12
README.md
@@ -46,7 +46,19 @@ make dev_docker
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
|
||||
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -15,14 +18,38 @@ RUN go mod download -x
|
||||
# Build event_watcher
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/event_watcher/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/event_watcher
|
||||
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
COPY --from=builder /bin/event_watcher /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -15,14 +18,38 @@ RUN go mod download -x
|
||||
# Build gas_oracle
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/gas_oracle/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/gas_oracle
|
||||
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
COPY --from=builder /bin/gas_oracle /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -1,3 +1,6 @@
|
||||
ARG LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
ARG SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.21-rust-nightly-2023-12-03 as base
|
||||
|
||||
@@ -15,14 +18,38 @@ RUN go mod download -x
|
||||
# Build rollup_relayer
|
||||
FROM base as builder
|
||||
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/rollup_relayer/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/rollup_relayer
|
||||
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
ARG LIBSCROLL_ZSTD_VERSION
|
||||
ARG SCROLL_LIB_PATH
|
||||
|
||||
RUN mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
RUN apt-get -qq update && apt-get -qq install -y wget
|
||||
|
||||
RUN wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
RUN wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
ENV LD_LIBRARY_PATH=$SCROLL_LIB_PATH
|
||||
ENV CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
COPY --from=builder /bin/rollup_relayer /bin/
|
||||
WORKDIR /app
|
||||
|
||||
@@ -183,12 +183,6 @@ type ChunkInfo struct {
|
||||
TxBytes []byte `json:"tx_bytes"`
|
||||
}
|
||||
|
||||
// SubCircuitRowUsage tracing info added in v0.11.0rc8
|
||||
type SubCircuitRowUsage struct {
|
||||
Name string `json:"name"`
|
||||
RowNumber uint64 `json:"row_number"`
|
||||
}
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace,omitempty"`
|
||||
@@ -197,9 +191,8 @@ type ChunkProof struct {
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
RowUsages []SubCircuitRowUsage `json:"row_usages,omitempty"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "01128ea9006601146ba80dbda959c96ebaefca463e78570e473a57d821db5ec1"
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.4.14"
|
||||
var tag = "v4.4.8"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -882,6 +882,17 @@ error ErrorIncorrectPreviousStateRoot()
|
||||
*Thrown when the previous state root doesn't match stored one.*
|
||||
|
||||
|
||||
### ErrorInvalidBatchHeaderVersion
|
||||
|
||||
```solidity
|
||||
error ErrorInvalidBatchHeaderVersion()
|
||||
```
|
||||
|
||||
|
||||
|
||||
*Thrown when the batch header version is invalid.*
|
||||
|
||||
|
||||
### ErrorLastL1MessageSkipped
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -98,6 +98,19 @@ describe("ScrollChain.blob", async () => {
|
||||
batchHeader0[25] = 1;
|
||||
});
|
||||
|
||||
it("should revert when ErrorInvalidBatchHeaderVersion", async () => {
|
||||
const header = new Uint8Array(121);
|
||||
header[0] = 2;
|
||||
await expect(chain.commitBatch(1, header, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
await expect(chain.commitBatch(2, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
"ErrorInvalidBatchHeaderVersion"
|
||||
);
|
||||
});
|
||||
|
||||
it("should revert when ErrorNoBlobFound", async () => {
|
||||
await expect(chain.commitBatch(1, batchHeader0, ["0x"], "0x")).to.revertedWithCustomError(
|
||||
chain,
|
||||
|
||||
@@ -74,6 +74,9 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @dev Thrown when the previous state root doesn't match stored one.
|
||||
error ErrorIncorrectPreviousStateRoot();
|
||||
|
||||
/// @dev Thrown when the batch header version is invalid.
|
||||
error ErrorInvalidBatchHeaderVersion();
|
||||
|
||||
/// @dev Thrown when the last message is skipped.
|
||||
error ErrorLastL1MessageSkipped();
|
||||
|
||||
@@ -116,8 +119,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
|
||||
/// @dev BLS Modulus value defined in EIP-4844 and the magic value returned from a successful call to the
|
||||
/// point evaluation precompile
|
||||
uint256 private constant BLS_MODULUS =
|
||||
52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
uint256 private constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
/// @notice The chain id of the corresponding layer 2 chain.
|
||||
uint64 public immutable layer2ChainId;
|
||||
@@ -308,10 +310,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
batchPtr,
|
||||
BatchHeaderV0Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
|
||||
);
|
||||
} else if (_version >= 1) {
|
||||
// versions 1 and 2 both use ChunkCodecV1 and BatchHeaderV1Codec,
|
||||
// but they use different blob encoding and different verifiers.
|
||||
|
||||
} else if (_version == 1) {
|
||||
bytes32 blobVersionedHash;
|
||||
(blobVersionedHash, _dataHash, _totalL1MessagesPoppedInBatch) = _commitChunksV1(
|
||||
_totalL1MessagesPoppedOverall,
|
||||
@@ -323,7 +322,7 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
_totalL1MessagesPoppedOverall := add(_totalL1MessagesPoppedOverall, _totalL1MessagesPoppedInBatch)
|
||||
}
|
||||
// store entries, the order matters
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, _version);
|
||||
BatchHeaderV1Codec.storeVersion(batchPtr, 1);
|
||||
BatchHeaderV1Codec.storeBatchIndex(batchPtr, _batchIndex);
|
||||
BatchHeaderV1Codec.storeL1MessagePopped(batchPtr, _totalL1MessagesPoppedInBatch);
|
||||
BatchHeaderV1Codec.storeTotalL1MessagePopped(batchPtr, _totalL1MessagesPoppedOverall);
|
||||
@@ -336,6 +335,8 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
batchPtr,
|
||||
BatchHeaderV1Codec.BATCH_HEADER_FIXED_LENGTH + _skippedL1MessageBitmap.length
|
||||
);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
|
||||
// check the length of bitmap
|
||||
@@ -710,15 +711,18 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
version := shr(248, calldataload(_batchHeader.offset))
|
||||
}
|
||||
|
||||
// version should be always 0 or 1 in current code
|
||||
uint256 _length;
|
||||
if (version == 0) {
|
||||
(batchPtr, _length) = BatchHeaderV0Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV0Codec.getBatchIndex(batchPtr);
|
||||
} else if (version >= 1) {
|
||||
} else if (version == 1) {
|
||||
(batchPtr, _length) = BatchHeaderV1Codec.loadAndValidate(_batchHeader);
|
||||
_batchHash = BatchHeaderV1Codec.computeBatchHash(batchPtr, _length);
|
||||
_batchIndex = BatchHeaderV1Codec.getBatchIndex(batchPtr);
|
||||
} else {
|
||||
revert ErrorInvalidBatchHeaderVersion();
|
||||
}
|
||||
// only check when genesis is imported
|
||||
if (committedBatches[_batchIndex] != _batchHash && finalizedStateRoots[0] != bytes32(0)) {
|
||||
|
||||
@@ -15,22 +15,10 @@ interface IL1GasPriceOracle {
|
||||
/// @param scalar The current fee scalar updated.
|
||||
event ScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current commit fee scalar is updated.
|
||||
/// @param scalar The current commit fee scalar updated.
|
||||
event CommitScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current blob fee scalar is updated.
|
||||
/// @param scalar The current blob fee scalar updated.
|
||||
event BlobScalarUpdated(uint256 scalar);
|
||||
|
||||
/// @notice Emitted when current l1 base fee is updated.
|
||||
/// @param l1BaseFee The current l1 base fee updated.
|
||||
event L1BaseFeeUpdated(uint256 l1BaseFee);
|
||||
|
||||
/// @notice Emitted when current l1 blob base fee is updated.
|
||||
/// @param l1BlobBaseFee The current l1 blob base fee updated.
|
||||
event L1BlobBaseFeeUpdated(uint256 l1BlobBaseFee);
|
||||
|
||||
/*************************
|
||||
* Public View Functions *
|
||||
*************************/
|
||||
@@ -38,24 +26,15 @@ interface IL1GasPriceOracle {
|
||||
/// @notice Return the current l1 fee overhead.
|
||||
function overhead() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 fee scalar before Curie fork.
|
||||
/// @notice Return the current l1 fee scalar.
|
||||
function scalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 commit fee scalar.
|
||||
function commitScalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the current l1 blob fee scalar.
|
||||
function blobScalar() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 base fee.
|
||||
function l1BaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Return the latest known l1 blob base fee.
|
||||
function l1BlobBaseFee() external view returns (uint256);
|
||||
|
||||
/// @notice Computes the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters.
|
||||
/// @param data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function getL1Fee(bytes memory data) external view returns (uint256);
|
||||
|
||||
@@ -63,7 +42,7 @@ interface IL1GasPriceOracle {
|
||||
/// represents the per-transaction gas overhead of posting the transaction and state
|
||||
/// roots to L1. Adds 74 bytes of padding to account for the fact that the input does
|
||||
/// not have a signature.
|
||||
/// @param data Signed fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @param data Unsigned fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function getL1GasUsed(bytes memory data) external view returns (uint256);
|
||||
|
||||
@@ -74,9 +53,4 @@ interface IL1GasPriceOracle {
|
||||
/// @notice Allows whitelisted caller to modify the l1 base fee.
|
||||
/// @param _l1BaseFee New l1 base fee.
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external;
|
||||
|
||||
/// @notice Allows whitelisted caller to modify the l1 base fee.
|
||||
/// @param _l1BaseFee New l1 base fee.
|
||||
/// @param _l1BlobBaseFee New l1 blob base fee.
|
||||
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external;
|
||||
}
|
||||
|
||||
@@ -17,28 +17,6 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
event UpdateWhitelist(address _oldWhitelist, address _newWhitelist);
|
||||
|
||||
/**********
|
||||
* Errors *
|
||||
**********/
|
||||
|
||||
/// @dev Thrown when the blob fee scalar exceed `MAX_BLOB_SCALAR`.
|
||||
error ErrExceedMaxBlobScalar();
|
||||
|
||||
/// @dev Thrown when the commit fee scalar exceed `MAX_COMMIT_SCALAR`.
|
||||
error ErrExceedMaxCommitScalar();
|
||||
|
||||
/// @dev Thrown when the l1 fee overhead exceed `MAX_OVERHEAD`.
|
||||
error ErrExceedMaxOverhead();
|
||||
|
||||
/// @dev Thrown when the l1 fee scalar exceed `MAX_SCALAR`.
|
||||
error ErrExceedMaxScalar();
|
||||
|
||||
/// @dev Thrown when the caller is not whitelisted.
|
||||
error ErrCallerNotWhitelisted();
|
||||
|
||||
/// @dev Thrown when we enable Curie fork after Curie fork.
|
||||
error ErrAlreadyInCurieFork();
|
||||
|
||||
/*************
|
||||
* Constants *
|
||||
*************/
|
||||
@@ -50,25 +28,9 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// Computed based on current l1 block gas limit.
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
|
||||
/// @dev The maximum possible l1 fee scale before Curie.
|
||||
/// @dev The maximum possible l1 fee scale.
|
||||
/// x1000 should be enough.
|
||||
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
|
||||
|
||||
/// @dev The maximum possible l1 commit fee scalar after Curie.
|
||||
/// We derive the commit scalar by
|
||||
/// ```
|
||||
/// commit_scalar = commit_gas_per_tx * fluctuation_multiplier * 1e9
|
||||
/// ```
|
||||
/// So, the value should not exceed 10^9 * 1e9 normally.
|
||||
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
|
||||
|
||||
/// @dev The maximum possible l1 blob fee scalar after Curie.
|
||||
/// We derive the blob scalar by
|
||||
/// ```
|
||||
/// blob_scalar = fluctuation_multiplier / compression_ratio / blob_util_ratio * 1e9
|
||||
/// ```
|
||||
/// So, the value should not exceed 10^9 * 1e9 normally.
|
||||
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
/*************
|
||||
* Variables *
|
||||
@@ -86,27 +48,6 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
/// @notice The address of whitelist contract.
|
||||
IWhitelist public whitelist;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override l1BlobBaseFee;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override commitScalar;
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
uint256 public override blobScalar;
|
||||
|
||||
/// @notice Indicates whether the network has gone through the Curie upgrade.
|
||||
bool public isCurie;
|
||||
|
||||
/*************
|
||||
* Modifiers *
|
||||
*************/
|
||||
|
||||
modifier onlyWhitelistedSender() {
|
||||
if (!whitelist.isSenderAllowed(msg.sender)) revert ErrCallerNotWhitelisted();
|
||||
_;
|
||||
}
|
||||
|
||||
/***************
|
||||
* Constructor *
|
||||
***************/
|
||||
@@ -121,116 +62,15 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function getL1Fee(bytes memory _data) external view override returns (uint256) {
|
||||
if (isCurie) {
|
||||
return _getL1FeeCurie(_data);
|
||||
} else {
|
||||
return _getL1FeeBeforeCurie(_data);
|
||||
}
|
||||
uint256 _l1GasUsed = getL1GasUsed(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
/// @dev The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
|
||||
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
|
||||
function getL1GasUsed(bytes memory _data) public view override returns (uint256) {
|
||||
if (isCurie) {
|
||||
// It is near zero since we put all transactions to blob.
|
||||
return 0;
|
||||
} else {
|
||||
return _getL1GasUsedBeforeCurie(_data);
|
||||
}
|
||||
}
|
||||
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external override onlyWhitelistedSender {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
}
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFeeAndBlobBaseFee(
|
||||
uint256 _l1BaseFee,
|
||||
uint256 _l1BlobBaseFee
|
||||
) external override onlyWhitelistedSender {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
l1BlobBaseFee = _l1BlobBaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
emit L1BlobBaseFeeUpdated(_l1BlobBaseFee);
|
||||
}
|
||||
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
if (_overhead > MAX_OVERHEAD) revert ErrExceedMaxOverhead();
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_SCALAR) revert ErrExceedMaxScalar();
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the commit scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setCommitScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_COMMIT_SCALAR) revert ErrExceedMaxCommitScalar();
|
||||
|
||||
commitScalar = _scalar;
|
||||
emit CommitScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the blob scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setBlobScalar(uint256 _scalar) external onlyOwner {
|
||||
if (_scalar > MAX_BLOB_SCALAR) revert ErrExceedMaxBlobScalar();
|
||||
|
||||
blobScalar = _scalar;
|
||||
emit BlobScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(address _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = IWhitelist(_newWhitelist);
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
}
|
||||
|
||||
/// @notice Enable the Curie fork (callable by contract owner).
|
||||
///
|
||||
/// @dev Since this is a predeploy contract, we will directly set the slot while hard fork
|
||||
/// to avoid external owner operations.
|
||||
/// The reason that we keep this function is for easy unit testing.
|
||||
function enableCurie() external onlyOwner {
|
||||
if (isCurie) revert ErrAlreadyInCurieFork();
|
||||
isCurie = true;
|
||||
}
|
||||
|
||||
/**********************
|
||||
* Internal Functions *
|
||||
**********************/
|
||||
|
||||
/// @dev Internal function to computes the amount of L1 gas used for a transaction before Curie fork.
|
||||
/// The `_data` is the RLP-encoded transaction with signature. And we also reserve additional
|
||||
/// 4 bytes in the non-zero bytes to store the number of bytes in the RLP-encoded transaction.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 gas for.
|
||||
/// @return Amount of L1 gas used to publish the transaction.
|
||||
function _getL1GasUsedBeforeCurie(bytes memory _data) private view returns (uint256) {
|
||||
uint256 _total = 0;
|
||||
uint256 _length = _data.length;
|
||||
unchecked {
|
||||
@@ -245,22 +85,48 @@ contract L1GasPriceOracle is OwnableBase, IL1GasPriceOracle {
|
||||
}
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters, before Curie fork.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function _getL1FeeBeforeCurie(bytes memory _data) private view returns (uint256) {
|
||||
uint256 _l1GasUsed = _getL1GasUsedBeforeCurie(_data);
|
||||
uint256 _l1Fee = _l1GasUsed * l1BaseFee;
|
||||
return (_l1Fee * scalar) / PRECISION;
|
||||
/*****************************
|
||||
* Public Mutating Functions *
|
||||
*****************************/
|
||||
|
||||
/// @inheritdoc IL1GasPriceOracle
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external override {
|
||||
require(whitelist.isSenderAllowed(msg.sender), "Not whitelisted sender");
|
||||
|
||||
l1BaseFee = _l1BaseFee;
|
||||
|
||||
emit L1BaseFeeUpdated(_l1BaseFee);
|
||||
}
|
||||
|
||||
/// @dev Internal function to compute the L1 portion of the fee based on the size of the rlp encoded input
|
||||
/// transaction, the current L1 base fee, and the various dynamic parameters, after Curie fork.
|
||||
/// @param _data Signed fully RLP-encoded transaction to get the L1 fee for.
|
||||
/// @return L1 fee that should be paid for the tx
|
||||
function _getL1FeeCurie(bytes memory _data) private view returns (uint256) {
|
||||
// We have bounded the value of `commitScalar` and `blobScalar`, the whole expression won't overflow.
|
||||
return (commitScalar * l1BaseFee + blobScalar * _data.length * l1BlobBaseFee) / PRECISION;
|
||||
/************************
|
||||
* Restricted Functions *
|
||||
************************/
|
||||
|
||||
/// @notice Allows the owner to modify the overhead.
|
||||
/// @param _overhead New overhead
|
||||
function setOverhead(uint256 _overhead) external onlyOwner {
|
||||
require(_overhead <= MAX_OVERHEAD, "exceed maximum overhead");
|
||||
|
||||
overhead = _overhead;
|
||||
emit OverheadUpdated(_overhead);
|
||||
}
|
||||
|
||||
/// Allows the owner to modify the scalar.
|
||||
/// @param _scalar New scalar
|
||||
function setScalar(uint256 _scalar) external onlyOwner {
|
||||
require(_scalar <= MAX_SCALE, "exceed maximum scale");
|
||||
|
||||
scalar = _scalar;
|
||||
emit ScalarUpdated(_scalar);
|
||||
}
|
||||
|
||||
/// @notice Update whitelist contract.
|
||||
/// @dev This function can only called by contract owner.
|
||||
/// @param _newWhitelist The address of new whitelist contract.
|
||||
function updateWhitelist(address _newWhitelist) external onlyOwner {
|
||||
address _oldWhitelist = address(whitelist);
|
||||
|
||||
whitelist = IWhitelist(_newWhitelist);
|
||||
emit UpdateWhitelist(_oldWhitelist, _newWhitelist);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,15 +4,14 @@ pragma solidity =0.8.24;
|
||||
|
||||
import {DSTestPlus} from "solmate/test/utils/DSTestPlus.sol";
|
||||
|
||||
import {L1BlockContainer} from "../L2/predeploys/L1BlockContainer.sol";
|
||||
import {L1GasPriceOracle} from "../L2/predeploys/L1GasPriceOracle.sol";
|
||||
import {Whitelist} from "../L2/predeploys/Whitelist.sol";
|
||||
|
||||
contract L1GasPriceOracleTest is DSTestPlus {
|
||||
uint256 private constant PRECISION = 1e9;
|
||||
uint256 private constant MAX_OVERHEAD = 30000000 / 16;
|
||||
uint256 private constant MAX_SCALAR = 1000 * PRECISION;
|
||||
uint256 private constant MAX_COMMIT_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_BLOB_SCALAR = 10 ** 9 * PRECISION;
|
||||
uint256 private constant MAX_SCALE = 1000 * PRECISION;
|
||||
|
||||
L1GasPriceOracle private oracle;
|
||||
Whitelist private whitelist;
|
||||
@@ -37,7 +36,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
hevm.stopPrank();
|
||||
|
||||
// overhead is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxOverhead.selector);
|
||||
hevm.expectRevert("exceed maximum overhead");
|
||||
oracle.setOverhead(MAX_OVERHEAD + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
@@ -47,7 +46,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testSetScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_SCALAR);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
@@ -56,8 +55,8 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxScalar.selector);
|
||||
oracle.setScalar(MAX_SCALAR + 1);
|
||||
hevm.expectRevert("exceed maximum scale");
|
||||
oracle.setScalar(MAX_SCALE + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.scalar(), 0);
|
||||
@@ -65,44 +64,6 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.scalar(), _scalar);
|
||||
}
|
||||
|
||||
function testSetCommitScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_COMMIT_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setCommitScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxCommitScalar.selector);
|
||||
oracle.setCommitScalar(MAX_COMMIT_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.commitScalar(), 0);
|
||||
oracle.setCommitScalar(_scalar);
|
||||
assertEq(oracle.commitScalar(), _scalar);
|
||||
}
|
||||
|
||||
function testSetBlobScalar(uint256 _scalar) external {
|
||||
_scalar = bound(_scalar, 0, MAX_BLOB_SCALAR);
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.setBlobScalar(_scalar);
|
||||
hevm.stopPrank();
|
||||
|
||||
// scale is too large
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrExceedMaxBlobScalar.selector);
|
||||
oracle.setBlobScalar(MAX_COMMIT_SCALAR + 1);
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.blobScalar(), 0);
|
||||
oracle.setBlobScalar(_scalar);
|
||||
assertEq(oracle.blobScalar(), _scalar);
|
||||
}
|
||||
|
||||
function testUpdateWhitelist(address _newWhitelist) external {
|
||||
hevm.assume(_newWhitelist != address(whitelist));
|
||||
|
||||
@@ -118,29 +79,12 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(address(oracle.whitelist()), _newWhitelist);
|
||||
}
|
||||
|
||||
function testEnableCurie() external {
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert("caller is not the owner");
|
||||
oracle.enableCurie();
|
||||
hevm.stopPrank();
|
||||
|
||||
// call by owner, should succeed
|
||||
assertBoolEq(oracle.isCurie(), false);
|
||||
oracle.enableCurie();
|
||||
assertBoolEq(oracle.isCurie(), true);
|
||||
|
||||
// enable twice, should revert
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrAlreadyInCurieFork.selector);
|
||||
oracle.enableCurie();
|
||||
}
|
||||
|
||||
function testSetL1BaseFee(uint256 _baseFee) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
|
||||
hevm.expectRevert("Not whitelisted sender");
|
||||
oracle.setL1BaseFee(_baseFee);
|
||||
hevm.stopPrank();
|
||||
|
||||
@@ -150,25 +94,7 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.l1BaseFee(), _baseFee);
|
||||
}
|
||||
|
||||
function testSetL1BaseFeeAndBlobBaseFee(uint256 _baseFee, uint256 _blobBaseFee) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
// call by non-owner, should revert
|
||||
hevm.startPrank(address(1));
|
||||
hevm.expectRevert(L1GasPriceOracle.ErrCallerNotWhitelisted.selector);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
hevm.stopPrank();
|
||||
|
||||
// call by owner, should succeed
|
||||
assertEq(oracle.l1BaseFee(), 0);
|
||||
assertEq(oracle.l1BlobBaseFee(), 0);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
assertEq(oracle.l1BaseFee(), _baseFee);
|
||||
assertEq(oracle.l1BlobBaseFee(), _blobBaseFee);
|
||||
}
|
||||
|
||||
function testGetL1GasUsedBeforeCurie(uint256 _overhead, bytes memory _data) external {
|
||||
function testGetL1GasUsed(uint256 _overhead, bytes memory _data) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
@@ -182,14 +108,14 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
assertEq(oracle.getL1GasUsed(_data), _gasUsed);
|
||||
}
|
||||
|
||||
function testGetL1FeeBeforeCurie(
|
||||
function testGetL1Fee(
|
||||
uint256 _baseFee,
|
||||
uint256 _overhead,
|
||||
uint256 _scalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_overhead = bound(_overhead, 0, MAX_OVERHEAD);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALAR);
|
||||
_scalar = bound(_scalar, 0, MAX_SCALE);
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
|
||||
oracle.setOverhead(_overhead);
|
||||
@@ -204,32 +130,4 @@ contract L1GasPriceOracleTest is DSTestPlus {
|
||||
|
||||
assertEq(oracle.getL1Fee(_data), (_gasUsed * _baseFee * _scalar) / PRECISION);
|
||||
}
|
||||
|
||||
function testGetL1GasUsedCurie(bytes memory _data) external {
|
||||
oracle.enableCurie();
|
||||
assertEq(oracle.getL1GasUsed(_data), 0);
|
||||
}
|
||||
|
||||
function testGetL1FeeCurie(
|
||||
uint256 _baseFee,
|
||||
uint256 _blobBaseFee,
|
||||
uint256 _commitScalar,
|
||||
uint256 _blobScalar,
|
||||
bytes memory _data
|
||||
) external {
|
||||
_baseFee = bound(_baseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_blobBaseFee = bound(_blobBaseFee, 0, 1e9 * 20000); // max 20k gwei
|
||||
_commitScalar = bound(_commitScalar, 0, MAX_COMMIT_SCALAR);
|
||||
_blobScalar = bound(_blobScalar, 0, MAX_BLOB_SCALAR);
|
||||
|
||||
oracle.enableCurie();
|
||||
oracle.setCommitScalar(_commitScalar);
|
||||
oracle.setBlobScalar(_blobScalar);
|
||||
oracle.setL1BaseFeeAndBlobBaseFee(_baseFee, _blobBaseFee);
|
||||
|
||||
assertEq(
|
||||
oracle.getL1Fee(_data),
|
||||
(_commitScalar * _baseFee + _blobScalar * _blobBaseFee * _data.length) / PRECISION
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -89,6 +89,12 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// invalid version, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert(ScrollChain.ErrorInvalidBatchHeaderVersion.selector);
|
||||
rollup.commitBatch(2, batchHeader0, new bytes[](1), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// batch header length too small, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert(BatchHeaderV0Codec.ErrorBatchHeaderLengthTooSmall.selector);
|
||||
|
||||
@@ -42,7 +42,6 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
bp := &BatchProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -65,31 +64,48 @@ func NewBatchProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return bp
|
||||
}
|
||||
|
||||
type chunkIndexRange struct {
|
||||
start uint64
|
||||
end uint64
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) merge(o chunkIndexRange) *chunkIndexRange {
|
||||
var start, end = r.start, r.end
|
||||
if o.start < r.start {
|
||||
start = o.start
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
if o.end > r.end {
|
||||
end = o.end
|
||||
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
return &chunkIndexRange{start, end}
|
||||
}
|
||||
|
||||
func (r *chunkIndexRange) contains(start, end uint64) bool {
|
||||
return r.start <= start && r.end > end
|
||||
}
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", taskCtx.HardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", taskCtx.HardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
|
||||
type getHardForkNameByBatchFunc func(*orm.Batch) (string, error)
|
||||
|
||||
func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
chunkRange *chunkIndexRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByBatchFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
startChunkIndex, endChunkIndex := chunkRange.start, chunkRange.end
|
||||
maxActiveAttempts := bp.cfg.ProverManager.ProversPerSession
|
||||
maxTotalAttempts := bp.cfg.ProverManager.SessionAttempts
|
||||
var batchTask *orm.Batch
|
||||
@@ -137,26 +153,14 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start batch proof generation session", "task_id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
)
|
||||
var err error
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(batchTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by batch", "task_id", batchTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
log.Info("start batch proof generation session", "id", batchTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: batchTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeBatch),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: proverVersion,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
@@ -166,18 +170,18 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
// Store session info.
|
||||
if err = bp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("insert batch prover task info fail", "task_id", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
log.Error("insert batch prover task info fail", "taskID", batchTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := bp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
bp.recoverActiveAttempts(ctx, batchTask)
|
||||
log.Error("format prover task failure", "task_id", batchTask.Hash, "err", err)
|
||||
log.Error("format prover task failure", "hash", batchTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
bp.batchTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
bp.batchTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
@@ -187,107 +191,6 @@ func (bp *BatchProverTask) doAssignTaskWithinChunkRange(ctx *gin.Context, taskCt
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) getChunkRangeByName(ctx *gin.Context, hardForkName string) (*chunkIndexRange, error) {
|
||||
hardForkNumber, err := bp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("batch assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// if the hard fork number set, rollup relayer must generate the chunk from hard fork number,
|
||||
// so the hard fork chunk's start_block_number must be ForkBlockNumber
|
||||
var startChunkIndex uint64 = 0
|
||||
var endChunkIndex uint64 = math.MaxInt64
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, bp.forkHeights)
|
||||
if fromBlockNum != 0 {
|
||||
startChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), fromBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork start chunk index", "forkName", hardForkName, "fromBlockNumber", fromBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if startChunk == nil {
|
||||
return nil, nil
|
||||
}
|
||||
startChunkIndex = startChunk.Index
|
||||
}
|
||||
if toBlockNum != math.MaxInt64 {
|
||||
toChunk, chunkErr := bp.chunkOrm.GetChunkByStartBlockNumber(ctx.Copy(), toBlockNum)
|
||||
if chunkErr != nil {
|
||||
log.Error("failed to get fork end chunk index", "forkName", hardForkName, "toBlockNumber", toBlockNum, "err", chunkErr)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
if toChunk != nil {
|
||||
// toChunk being nil only indicates that we haven't yet reached the fork boundary
|
||||
// don't need change the endChunkIndex of math.MaxInt64
|
||||
endChunkIndex = toChunk.Index
|
||||
}
|
||||
}
|
||||
return &chunkIndexRange{startChunkIndex, endChunkIndex}, nil
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
chunkRange, err := bp.getChunkRangeByName(ctx, taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRange == nil {
|
||||
return nil, nil
|
||||
}
|
||||
return bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
chunkRanges [2]*chunkIndexRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = bp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
chunkRanges[i], err = bp.getChunkRangeByName(ctx, hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if chunkRanges[i] == nil {
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
chunkRange := chunkRanges[0].merge(*chunkRanges[1])
|
||||
var hardForkName string
|
||||
getHardForkName := func(batch *orm.Batch) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if chunkRanges[i].contains(batch.StartChunkIndex, batch.EndChunkIndex) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get batch not belongs to any hard fork name", "batch id", batch.Index)
|
||||
return "", fmt.Errorf("get batch not belongs to any hard fork name, batch id: %d", batch.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := bp.doAssignTaskWithinChunkRange(ctx, taskCtx, chunkRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
// Assign load and assign batch tasks
|
||||
func (bp *BatchProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := bp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return bp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return bp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (bp *BatchProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
// get chunk from db
|
||||
chunks, err := bp.chunkOrm.GetChunksByBatchHash(ctx, task.TaskID)
|
||||
|
||||
@@ -39,7 +39,6 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
cp := &ChunkProverTask{
|
||||
BaseProverTask: BaseProverTask{
|
||||
vkMap: vkMap,
|
||||
reverseVkMap: reverseMap(vkMap),
|
||||
db: db,
|
||||
cfg: cfg,
|
||||
nameForkMap: nameForkMap,
|
||||
@@ -62,11 +61,20 @@ func NewChunkProverTask(cfg *config.Config, chainCfg *params.ChainConfig, db *go
|
||||
return cp
|
||||
}
|
||||
|
||||
type getHardForkNameByChunkFunc func(*orm.Chunk) (string, error)
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCtx *proverTaskContext,
|
||||
blockRange *blockRange, getTaskParameter *coordinatorType.GetTaskParameter, getHardForkName getHardForkNameByChunkFunc) (*coordinatorType.GetTaskSchema, error) {
|
||||
fromBlockNum, toBlockNum := blockRange.from, blockRange.to
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", taskCtx.HardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
if toBlockNum > getTaskParameter.ProverHeight {
|
||||
toBlockNum = getTaskParameter.ProverHeight + 1
|
||||
}
|
||||
@@ -118,26 +126,14 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
log.Info("start chunk generation session", "task_id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
var (
|
||||
proverVersion = taskCtx.ProverVersion
|
||||
hardForkName = taskCtx.HardForkName
|
||||
err error
|
||||
)
|
||||
if getHardForkName != nil {
|
||||
hardForkName, err = getHardForkName(chunkTask)
|
||||
if err != nil {
|
||||
log.Error("failed to get hard fork name by chunk", "task_id", chunkTask.Hash, "error", err.Error())
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
}
|
||||
log.Info("start chunk generation session", "id", chunkTask.Hash, "public key", taskCtx.PublicKey, "prover name", taskCtx.ProverName)
|
||||
|
||||
proverTask := orm.ProverTask{
|
||||
TaskID: chunkTask.Hash,
|
||||
ProverPublicKey: taskCtx.PublicKey,
|
||||
TaskType: int16(message.ProofTypeChunk),
|
||||
ProverName: taskCtx.ProverName,
|
||||
ProverVersion: proverVersion,
|
||||
ProverVersion: taskCtx.ProverVersion,
|
||||
ProvingStatus: int16(types.ProverAssigned),
|
||||
FailureType: int16(types.ProverTaskFailureTypeUndefined),
|
||||
// here why need use UTC time. see scroll/common/databased/db.go
|
||||
@@ -146,18 +142,18 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
|
||||
if err = cp.proverTaskOrm.InsertProverTask(ctx.Copy(), &proverTask); err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("insert chunk prover task fail", "task_id", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
log.Error("insert chunk prover task fail", "taskID", chunkTask.Hash, "publicKey", taskCtx.PublicKey, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
taskMsg, err := cp.formatProverTask(ctx.Copy(), &proverTask)
|
||||
if err != nil {
|
||||
cp.recoverActiveAttempts(ctx, chunkTask)
|
||||
log.Error("format prover task failure", "task_id", chunkTask.Hash, "err", err)
|
||||
log.Error("format prover task failure", "hash", chunkTask.Hash, "err", err)
|
||||
return nil, ErrCoordinatorInternalFailure
|
||||
}
|
||||
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(hardForkName).Inc()
|
||||
cp.chunkTaskGetTaskTotal.WithLabelValues(taskCtx.HardForkName).Inc()
|
||||
cp.chunkTaskGetTaskProver.With(prometheus.Labels{
|
||||
coordinatorType.LabelProverName: proverTask.ProverName,
|
||||
coordinatorType.LabelProverPublicKey: proverTask.ProverPublicKey,
|
||||
@@ -167,95 +163,6 @@ func (cp *ChunkProverTask) doAssignTaskWithinBlockRange(ctx *gin.Context, taskCt
|
||||
return taskMsg, nil
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithSingleCircuit(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
blockRange, err := cp.getBlockRangeByName(taskCtx.HardForkName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, nil)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) assignWithTwoCircuits(ctx *gin.Context, taskCtx *proverTaskContext, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
var (
|
||||
hardForkNames [2]string
|
||||
blockRanges [2]*blockRange
|
||||
err error
|
||||
)
|
||||
for i := 0; i < 2; i++ {
|
||||
hardForkNames[i] = cp.reverseVkMap[getTaskParameter.VKs[i]]
|
||||
blockRanges[i], err = cp.getBlockRangeByName(hardForkNames[i])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
blockRange, err := blockRanges[0].merge(*blockRanges[1])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var hardForkName string
|
||||
getHardForkName := func(chunk *orm.Chunk) (string, error) {
|
||||
for i := 0; i < 2; i++ {
|
||||
if blockRanges[i].contains(chunk.StartBlockNumber, chunk.EndBlockNumber) {
|
||||
hardForkName = hardForkNames[i]
|
||||
break
|
||||
}
|
||||
}
|
||||
if hardForkName == "" {
|
||||
log.Warn("get chunk not belongs to any hard fork name", "chunk id", chunk.Index)
|
||||
return "", fmt.Errorf("get chunk not belongs to any hard fork name, chunk id: %d", chunk.Index)
|
||||
}
|
||||
return hardForkName, nil
|
||||
}
|
||||
schema, err := cp.doAssignTaskWithinBlockRange(ctx, taskCtx, blockRange, getTaskParameter, getHardForkName)
|
||||
if schema != nil && err == nil {
|
||||
schema.HardForkName = hardForkName
|
||||
return schema, nil
|
||||
}
|
||||
return schema, err
|
||||
}
|
||||
|
||||
type blockRange struct {
|
||||
from uint64
|
||||
to uint64
|
||||
}
|
||||
|
||||
func (r *blockRange) merge(o blockRange) (*blockRange, error) {
|
||||
if r.from == o.to {
|
||||
return &blockRange{o.from, r.to}, nil
|
||||
} else if r.to == o.from {
|
||||
return &blockRange{r.from, o.to}, nil
|
||||
}
|
||||
return nil, fmt.Errorf("two ranges are not adjacent")
|
||||
}
|
||||
|
||||
func (r *blockRange) contains(start, end uint64) bool {
|
||||
return r.from <= start && r.to > end
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) getBlockRangeByName(hardForkName string) (*blockRange, error) {
|
||||
hardForkNumber, err := cp.getHardForkNumberByName(hardForkName)
|
||||
if err != nil {
|
||||
log.Error("chunk assign failure because of the hard fork name don't exist", "fork name", hardForkName)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
fromBlockNum, toBlockNum := forks.BlockRange(hardForkNumber, cp.forkHeights)
|
||||
return &blockRange{fromBlockNum, toBlockNum}, nil
|
||||
}
|
||||
|
||||
// Assign the chunk proof which need to prove
|
||||
func (cp *ChunkProverTask) Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error) {
|
||||
taskCtx, err := cp.checkParameter(ctx, getTaskParameter)
|
||||
if err != nil || taskCtx == nil {
|
||||
return nil, fmt.Errorf("check prover task parameter failed, error:%w", err)
|
||||
}
|
||||
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
return cp.assignWithTwoCircuits(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
return cp.assignWithSingleCircuit(ctx, taskCtx, getTaskParameter)
|
||||
}
|
||||
|
||||
func (cp *ChunkProverTask) formatProverTask(ctx context.Context, task *orm.ProverTask) (*coordinatorType.GetTaskSchema, error) {
|
||||
// Get block hashes.
|
||||
blockHashes, dbErr := cp.blockOrm.GetL2BlockHashesByChunkHash(ctx, task.TaskID)
|
||||
|
||||
@@ -29,27 +29,14 @@ type ProverTask interface {
|
||||
Assign(ctx *gin.Context, getTaskParameter *coordinatorType.GetTaskParameter) (*coordinatorType.GetTaskSchema, error)
|
||||
}
|
||||
|
||||
func reverseMap(input map[string]string) map[string]string {
|
||||
output := make(map[string]string, len(input))
|
||||
for k, v := range input {
|
||||
if k != "" {
|
||||
output[v] = k
|
||||
}
|
||||
}
|
||||
return output
|
||||
}
|
||||
|
||||
// BaseProverTask a base prover task which contain series functions
|
||||
type BaseProverTask struct {
|
||||
cfg *config.Config
|
||||
db *gorm.DB
|
||||
|
||||
// key is hardForkName, value is vk
|
||||
vkMap map[string]string
|
||||
// key is vk, value is hardForkName
|
||||
reverseVkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
vkMap map[string]string
|
||||
nameForkMap map[string]uint64
|
||||
forkHeights []uint64
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
@@ -87,42 +74,30 @@ func (b *BaseProverTask) checkParameter(ctx *gin.Context, getTaskParameter *coor
|
||||
}
|
||||
ptc.ProverVersion = proverVersion.(string)
|
||||
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
|
||||
if !version.CheckScrollRepoVersion(proverVersion.(string), b.cfg.ProverManager.MinProverVersion) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s", b.cfg.ProverManager.MinProverVersion, proverVersion.(string))
|
||||
}
|
||||
|
||||
// signals that the prover is multi-circuits version
|
||||
if len(getTaskParameter.VKs) > 0 {
|
||||
if len(getTaskParameter.VKs) != 2 {
|
||||
return nil, fmt.Errorf("parameter vks length must be 2")
|
||||
}
|
||||
for _, vk := range getTaskParameter.VKs {
|
||||
if _, exists := b.reverseVkMap[vk]; !exists {
|
||||
return nil, fmt.Errorf("incompatible vk. vk %s is invalid", vk)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hardForkName, hardForkNameExist := ctx.Get(coordinatorType.HardForkName)
|
||||
if !hardForkNameExist {
|
||||
return nil, fmt.Errorf("get hard fork name from context failed")
|
||||
}
|
||||
ptc.HardForkName = hardForkName.(string)
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
vk, vkExist := b.vkMap[ptc.HardForkName]
|
||||
if !vkExist {
|
||||
return nil, fmt.Errorf("can't get vk for hard fork:%s, vkMap:%v", ptc.HardForkName, b.vkMap)
|
||||
}
|
||||
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
// if the prover has a different vk
|
||||
if getTaskParameter.VK != vk {
|
||||
log.Error("vk inconsistency", "prover vk", getTaskParameter.VK, "vk", vk, "hardForkName", ptc.HardForkName)
|
||||
// if the prover reports a different prover version
|
||||
if !version.CheckScrollProverVersion(proverVersion.(string)) {
|
||||
return nil, fmt.Errorf("incompatible prover version. please upgrade your prover, expect version: %s, actual version: %s", version.Version, proverVersion.(string))
|
||||
}
|
||||
// if the prover reports a same prover version
|
||||
return nil, fmt.Errorf("incompatible vk. please check your params files or config files")
|
||||
}
|
||||
|
||||
isBlocked, err := b.proverBlockListOrm.IsPublicKeyBlocked(ctx.Copy(), publicKey.(string))
|
||||
|
||||
@@ -134,12 +134,7 @@ func (m *ProofReceiverLogic) HandleZkProof(ctx *gin.Context, proofMsg *message.P
|
||||
if len(pv) == 0 {
|
||||
return fmt.Errorf("get ProverVersion from context failed")
|
||||
}
|
||||
// use hard_fork_name from parameter first
|
||||
// if prover support multi hard_forks, the real hard_fork_name is not set to the gin context
|
||||
hardForkName := proofParameter.HardForkName
|
||||
if hardForkName == "" {
|
||||
hardForkName = ctx.GetString(coordinatorType.HardForkName)
|
||||
}
|
||||
hardForkName := ctx.GetString(coordinatorType.HardForkName)
|
||||
|
||||
var proverTask *orm.ProverTask
|
||||
var err error
|
||||
|
||||
Binary file not shown.
Binary file not shown.
@@ -3,8 +3,8 @@
|
||||
package verifier
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
@@ -164,7 +164,9 @@ func (v *Verifier) loadEmbedVK() error {
|
||||
return err
|
||||
}
|
||||
|
||||
v.BatchVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["bernoulli"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap["shanghai"] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap["shanghai"] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
v.BatchVKMap[""] = base64.StdEncoding.EncodeToString(batchVKBytes)
|
||||
v.ChunkVKMap[""] = base64.StdEncoding.EncodeToString(chunkVkBytes)
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -2,17 +2,15 @@ package types
|
||||
|
||||
// GetTaskParameter for ProverTasks request parameter
|
||||
type GetTaskParameter struct {
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"` // will be deprecated after all go_prover offline
|
||||
VKs []string `form:"vks" json:"vks"` // for rust_prover that supporting multi-circuits
|
||||
ProverHeight uint64 `form:"prover_height" json:"prover_height"`
|
||||
TaskType int `form:"task_type" json:"task_type"`
|
||||
VK string `form:"vk" json:"vk"`
|
||||
}
|
||||
|
||||
// GetTaskSchema the schema data return to prover for get prover task
|
||||
type GetTaskSchema struct {
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
HardForkName string `json:"hard_fork_name"`
|
||||
UUID string `json:"uuid"`
|
||||
TaskID string `json:"task_id"`
|
||||
TaskType int `json:"task_type"`
|
||||
TaskData string `json:"task_data"`
|
||||
}
|
||||
|
||||
@@ -3,12 +3,11 @@ package types
|
||||
// SubmitProofParameter the SubmitProof api request parameter
|
||||
type SubmitProofParameter struct {
|
||||
// TODO when prover have upgrade, need change this field to required
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
HardForkName string `form:"hard_fork_name" json:"hard_fork_name"`
|
||||
UUID string `form:"uuid" json:"uuid"`
|
||||
TaskID string `form:"task_id" json:"task_id" binding:"required"`
|
||||
TaskType int `form:"task_type" json:"task_type" binding:"required"`
|
||||
Status int `form:"status" json:"status"`
|
||||
Proof string `form:"proof" json:"proof"`
|
||||
FailureType int `form:"failure_type" json:"failure_type"`
|
||||
FailureMsg string `form:"failure_msg" json:"failure_msg"`
|
||||
}
|
||||
|
||||
74
database/script/abi.json
Normal file
74
database/script/abi.json
Normal file
@@ -0,0 +1,74 @@
|
||||
[
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "basefee",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "BaseFeeSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "bytes32",
|
||||
"name": "data",
|
||||
"type": "bytes32"
|
||||
}
|
||||
],
|
||||
"name": "McopySuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [
|
||||
{
|
||||
"indexed": false,
|
||||
"internalType": "uint256",
|
||||
"name": "value",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "TloadSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"anonymous": false,
|
||||
"inputs": [],
|
||||
"name": "TstoreSuccess",
|
||||
"type": "event"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "useBaseFee",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [],
|
||||
"name": "useMcopy",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
},
|
||||
{
|
||||
"inputs": [
|
||||
{
|
||||
"internalType": "uint256",
|
||||
"name": "newValue",
|
||||
"type": "uint256"
|
||||
}
|
||||
],
|
||||
"name": "useTloadTstore",
|
||||
"outputs": [],
|
||||
"stateMutability": "nonpayable",
|
||||
"type": "function"
|
||||
}
|
||||
]
|
||||
221
database/script/send_transactions.go
Normal file
221
database/script/send_transactions.go
Normal file
@@ -0,0 +1,221 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
privateKey, err := crypto.HexToECDSA(strings.TrimPrefix(os.Getenv("L2_DEPLOYER_PRIVATE_KEY"), "0x"))
|
||||
if err != nil {
|
||||
log.Crit("failed to create private key", "err", err)
|
||||
}
|
||||
publicKey := privateKey.Public()
|
||||
publicKeyECDSA, ok := publicKey.(*ecdsa.PublicKey)
|
||||
if !ok {
|
||||
log.Crit("failed to cast public key to ECDSA")
|
||||
}
|
||||
fromAddress := crypto.PubkeyToAddress(*publicKeyECDSA)
|
||||
|
||||
client, err := ethclient.Dial(os.Getenv("SCROLL_L2_DEPLOYMENT_RPC"))
|
||||
if err != nil {
|
||||
log.Crit("failed to connect to network", "err", err)
|
||||
}
|
||||
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, new(big.Int).SetUint64(222222))
|
||||
if err != nil {
|
||||
log.Crit("failed to initialize keyed transactor with chain ID", "err", err)
|
||||
}
|
||||
|
||||
abiJSON, err := os.ReadFile("abi.json")
|
||||
if err != nil {
|
||||
log.Crit("failed to read ABI file", "err", err)
|
||||
}
|
||||
|
||||
l2TestCurieOpcodesMetaData := &bind.MetaData{ABI: string(abiJSON)}
|
||||
l2TestCurieOpcodesAbi, err := l2TestCurieOpcodesMetaData.GetAbi()
|
||||
if err != nil {
|
||||
log.Crit("failed to get abi", "err", err)
|
||||
}
|
||||
|
||||
nonce, err := client.PendingNonceAt(context.Background(), fromAddress)
|
||||
if err != nil {
|
||||
log.Crit("failed to get pending nonce", "err", err)
|
||||
}
|
||||
|
||||
useTloadTstoreCalldata, err := l2TestCurieOpcodesAbi.Pack("useTloadTstore", new(big.Int).SetUint64(9876543210))
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useTloadTstore calldata", "err", err)
|
||||
}
|
||||
|
||||
useMcopyCalldata, err := l2TestCurieOpcodesAbi.Pack("useMcopy")
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useMcopy calldata", "err", err)
|
||||
}
|
||||
|
||||
useBaseFee, err := l2TestCurieOpcodesAbi.Pack("useBaseFee")
|
||||
if err != nil {
|
||||
log.Crit("failed to pack useBaseFee calldata", "err", err)
|
||||
}
|
||||
|
||||
l2TestCurieOpcodesAddr := common.HexToAddress(os.Getenv("L2_TEST_CURIE_OPCODES_ADDR"))
|
||||
|
||||
txTypes := []int{
|
||||
LegacyTxType,
|
||||
AccessListTxType,
|
||||
DynamicFeeTxType,
|
||||
}
|
||||
|
||||
accessLists := []types.AccessList{
|
||||
nil,
|
||||
{
|
||||
{Address: common.HexToAddress("0x0000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000000"),
|
||||
}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x1000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000001")}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x2000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000002"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000003"),
|
||||
}},
|
||||
{Address: common.HexToAddress("0x3000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000004"),
|
||||
}},
|
||||
},
|
||||
{
|
||||
{Address: common.HexToAddress("0x4000000000000000000000000000000000000000"), StorageKeys: []common.Hash{
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"),
|
||||
common.HexToHash("0x0000000000000000000000000000000000000000000000000000000000000005"), // repetitive storage key
|
||||
}},
|
||||
},
|
||||
}
|
||||
|
||||
for i := 0; i < 50; i++ {
|
||||
for _, txType := range txTypes {
|
||||
for _, accessList := range accessLists {
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useTloadTstoreCalldata); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useMcopyCalldata); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &l2TestCurieOpcodesAddr, nonce, accessList, nil, useBaseFee); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, nil, []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), []byte{0x01, 0x02, 0x03, 0x04}); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
|
||||
if err := sendTransaction(client, auth, txType, &fromAddress, nonce, accessList, new(big.Int).SetUint64(1), nil); err != nil {
|
||||
log.Crit("failed to send transaction", "nonce", nonce, "err", err)
|
||||
}
|
||||
nonce += 1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
LegacyTxType = 1
|
||||
AccessListTxType = 2
|
||||
DynamicFeeTxType = 3
|
||||
)
|
||||
|
||||
func sendTransaction(client *ethclient.Client, auth *bind.TransactOpts, txType int, to *common.Address, nonce uint64, accessList types.AccessList, value *big.Int, data []byte) error {
|
||||
var txData types.TxData
|
||||
switch txType {
|
||||
case LegacyTxType:
|
||||
txData = &types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
GasPrice: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
}
|
||||
case AccessListTxType:
|
||||
txData = &types.AccessListTx{
|
||||
ChainID: new(big.Int).SetUint64(222222),
|
||||
Nonce: nonce,
|
||||
GasPrice: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
}
|
||||
case DynamicFeeTxType:
|
||||
txData = &types.DynamicFeeTx{
|
||||
ChainID: new(big.Int).SetUint64(222222),
|
||||
Nonce: nonce,
|
||||
GasTipCap: new(big.Int).SetUint64(1000000000),
|
||||
GasFeeCap: new(big.Int).SetUint64(1000000000),
|
||||
Gas: 300000,
|
||||
To: to,
|
||||
Value: value,
|
||||
Data: data,
|
||||
AccessList: accessList,
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("invalid transaction type: %d", txType)
|
||||
}
|
||||
|
||||
signedTx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to sign tx: %w", err)
|
||||
}
|
||||
|
||||
if err = client.SendTransaction(context.Background(), signedTx); err != nil {
|
||||
return fmt.Errorf("failed to send tx: %w", err)
|
||||
}
|
||||
|
||||
log.Info("transaction sent", "txHash", signedTx.Hash().Hex())
|
||||
|
||||
var receipt *types.Receipt
|
||||
for {
|
||||
receipt, err = client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
if err == nil {
|
||||
if receipt.Status != types.ReceiptStatusSuccessful {
|
||||
return fmt.Errorf("transaction failed: %s", signedTx.Hash().Hex())
|
||||
}
|
||||
break
|
||||
}
|
||||
log.Warn("waiting for receipt", "txHash", signedTx.Hash())
|
||||
time.Sleep(2 * time.Second)
|
||||
}
|
||||
|
||||
log.Info("Sent transaction", "txHash", signedTx.Hash().Hex(), "from", auth.From.Hex(), "nonce", signedTx.Nonce(), "to", to.Hex())
|
||||
return nil
|
||||
}
|
||||
@@ -137,6 +137,7 @@ github.com/Azure/go-autorest/autorest/mocks v0.4.1/go.mod h1:LTp+uSrOhSkaKrUy935
|
||||
github.com/Azure/go-autorest/logger v0.2.1/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZmbF5NWuPV8+WeEW8=
|
||||
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
|
||||
github.com/AzureAD/microsoft-authentication-library-for-go v0.6.0/go.mod h1:BDJ5qMFKx9DugEg3+uQSDCdbYPr5s9vBTrL9P8TpqOU=
|
||||
github.com/BurntSushi/toml v1.3.2 h1:o7IhLm0Msx3BaB+n3Ag7L8EVlByGnpq14C4YWiu/gL8=
|
||||
github.com/BurntSushi/toml v1.3.2/go.mod h1:CxXYINrC8qIiEnFrOxCa7Jy5BFHlXnUU2pbicEuybxQ=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/NYTimes/gziphandler v1.1.1/go.mod h1:n/CVRwUEOgIxrgPvAQhUUr9oeUtvrhMomdKFjzJNB0c=
|
||||
@@ -261,6 +262,7 @@ github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
|
||||
github.com/envoyproxy/go-control-plane v0.11.1/go.mod h1:uhMcXKCQMEJHiAb0w+YGefQLaTEw+YhGluxZkrTmD0g=
|
||||
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
|
||||
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
|
||||
github.com/evanphx/json-patch v4.12.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk=
|
||||
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
|
||||
@@ -319,6 +321,7 @@ github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1C
|
||||
github.com/jmhodges/clock v1.2.0/go.mod h1:qKjhA7x7u/lQpPB1XAqX1b1lCI/w3/fNuYpI/ZjLynI=
|
||||
github.com/josephspurrier/goversioninfo v1.4.0/go.mod h1:JWzv5rKQr+MmW+LvM412ToT/IkYDZjaclF2pKDss8IY=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/karalabe/usb v0.0.0-20211005121534-4c5740d64559/go.mod h1:Od972xHfMJowv7NGVDiWVxk2zxnWgjLlJzE+F4F7AGU=
|
||||
github.com/kisielk/sqlstruct v0.0.0-20201105191214-5f3e10d3ab46/go.mod h1:yyMNCyc/Ib3bDTKd379tNMpB/7/H5TjM2Y9QJ5THLbE=
|
||||
@@ -378,8 +381,8 @@ github.com/sclevine/agouti v3.0.0+incompatible h1:8IBJS6PWz3uTlMP3YBIR5f+KAldcGu
|
||||
github.com/sclevine/agouti v3.0.0+incompatible/go.mod h1:b4WX9W9L1sfQKXeJf1mUTLZKJ48R1S7H23Ji7oFO5Bw=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce h1:SN43TBs7VaJt9q737eWWqGz0OCg4v+PtUn3RbJcG1o0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240515170552-2e5286688fce/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7 h1:CDrPMqifvAVyYqu0x1J5qickVV0b51tApPnOwDYLESI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
|
||||
@@ -515,6 +518,7 @@ google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11
|
||||
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
|
||||
google.golang.org/protobuf v1.32.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8=
|
||||
gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k=
|
||||
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
|
||||
gopkg.in/square/go-jose.v2 v2.5.1/go.mod h1:M9dMgbHiYLoDGQrXy7OpJDJWiKiU//h+vD76mk0e1AI=
|
||||
|
||||
@@ -3,8 +3,8 @@
|
||||
package core
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath=${SRCDIR}/lib
|
||||
#cgo LDFLAGS: -lzkp -lm -ldl -lzktrie -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#cgo gpu LDFLAGS: -lzkp -lm -ldl -lgmp -lstdc++ -lprocps -lzktrie -L/usr/local/cuda/lib64/ -lcudart -L${SRCDIR}/lib/ -Wl,-rpath,${SRCDIR}/lib
|
||||
#include <stdlib.h>
|
||||
#include "./lib/libzkp.h"
|
||||
*/
|
||||
|
||||
5678
prover_rust/Cargo.lock
generated
5678
prover_rust/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -1,47 +0,0 @@
|
||||
[package]
|
||||
name = "prover_rust"
|
||||
version = "0.1.0"
|
||||
edition = "2021"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
|
||||
|
||||
[patch.crates-io]
|
||||
ethers-signers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves", branch = "v0.1.0" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
poseidon = { git = "https://github.com/scroll-tech/poseidon.git", branch = "main" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/bls12_381"]
|
||||
bls12_381 = { git = "https://github.com/scroll-tech/bls12_381", branch = "feat/impl_scalar_field" }
|
||||
|
||||
|
||||
[dependencies]
|
||||
anyhow = "1.0"
|
||||
log = "0.4"
|
||||
env_logger = "0.11.3"
|
||||
serde = { version = "1.0.198", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
futures = "0.3.30"
|
||||
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "v1.1" }
|
||||
snark-verifier-sdk = { git = "https://github.com/scroll-tech/snark-verifier", branch = "develop", default-features = false, features = ["loader_halo2", "loader_evm", "halo2-pse"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", branch = "v0.10", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
prover_next = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.11.0rc8-hotfix1", package = "prover", default-features = false, features = ["parallel_syn", "scroll"] }
|
||||
base64 = "0.13.1"
|
||||
reqwest = { version = "0.12.4", features = ["gzip"] }
|
||||
reqwest-middleware = "0.3"
|
||||
reqwest-retry = "0.5"
|
||||
once_cell = "1.19.0"
|
||||
hex = "0.4.3"
|
||||
tiny-keccak = { version = "2.0.0", features = ["sha3", "keccak"] }
|
||||
rand = "0.8.5"
|
||||
eth-keystore = "0.5.0"
|
||||
rlp = "0.5.2"
|
||||
tokio = "1.37.0"
|
||||
sled = "0.34.7"
|
||||
http = "1.1.0"
|
||||
clap = { version = "4.5", features = ["derive"] }
|
||||
@@ -1,43 +0,0 @@
|
||||
.PHONY: prover
|
||||
|
||||
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
|
||||
$(info ************ First ************)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "#" -f2 | cut -c-7)
|
||||
else
|
||||
$(info ************ Second ************)
|
||||
HALO2_VERSION=$(shell grep -m 1 "halo2.git" ./Cargo.lock | cut -d "\#" -f2 | cut -c-7)
|
||||
endif
|
||||
|
||||
ZKEVM_VERSION=$(shell ./print_high_zkevm_version.sh)
|
||||
ifeq (${ZKEVM_VERSION},)
|
||||
$(error ZKEVM_VERSION not set)
|
||||
else
|
||||
$(info ZKEVM_VERSION is ${ZKEVM_VERSION})
|
||||
endif
|
||||
|
||||
ZKEVM_COMMIT=$(shell echo ${ZKEVM_VERSION} | cut -d " " -f2)
|
||||
$(info ZKEVM_COMMIT is ${ZKEVM_COMMIT})
|
||||
|
||||
HALO2_GPU_VERSION=$(shell ./print_halo2gpu_version.sh | sed -n '2p')
|
||||
|
||||
GIT_REV=$(shell git rev-parse --short HEAD)
|
||||
GO_TAG=$(shell grep "var tag = " ../common/version/version.go | cut -d "\"" -f2)
|
||||
|
||||
ifeq (${GO_TAG},)
|
||||
$(error GO_TAG not set)
|
||||
else
|
||||
$(info GO_TAG is ${GO_TAG})
|
||||
endif
|
||||
|
||||
ifeq (${HALO2_GPU_VERSION},)
|
||||
# use halo2_proofs with CPU
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_VERSION}
|
||||
else
|
||||
# use halo2_gpu
|
||||
ZK_VERSION=${ZKEVM_COMMIT}-${HALO2_GPU_VERSION}
|
||||
endif
|
||||
|
||||
prover:
|
||||
GO_TAG=${GO_TAG} GIT_REV=${GIT_REV} ZK_VERSION=${ZK_VERSION} cargo build --release
|
||||
rm -rf ./lib && mkdir ./lib
|
||||
find target/ -name "libzktrie.so" | xargs -I{} cp {} ./lib
|
||||
@@ -1,26 +0,0 @@
|
||||
{
|
||||
"prover_name": "prover-1",
|
||||
"keystore_path": "keystore.json",
|
||||
"keystore_password": "prover-pwd",
|
||||
"db_path": "unique-db-path-for-prover-1",
|
||||
"proof_type": 2,
|
||||
"low_version_circuit": {
|
||||
"hard_fork_name": "bernoulli",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"high_version_circuit": {
|
||||
"hard_fork_name": "curie",
|
||||
"params_path": "params",
|
||||
"assets_path": "assets"
|
||||
},
|
||||
"coordinator": {
|
||||
"base_url": "http://localhost:8555",
|
||||
"retry_count": 10,
|
||||
"retry_wait_time_sec": 10,
|
||||
"connection_timeout_sec": 30
|
||||
},
|
||||
"l2geth": {
|
||||
"endpoint": "http://localhost:9999"
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
config_file="$HOME/.cargo/config"
|
||||
|
||||
if [ ! -e "$config_file" ]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [[ $(head -n 1 "$config_file") == "#"* ]]; then
|
||||
exit 0
|
||||
fi
|
||||
|
||||
halo2gpu_path=$(grep -Po '(?<=paths = \[")([^"]*)' $config_file)
|
||||
|
||||
pushd $halo2gpu_path
|
||||
|
||||
commit_hash=$(git log --pretty=format:%h -n 1)
|
||||
echo "${commit_hash:0:7}"
|
||||
|
||||
popd
|
||||
|
||||
@@ -1,9 +0,0 @@
|
||||
|
||||
|
||||
higher_zkevm_item=`grep "zkevm-circuits.git" ./Cargo.lock | sort | uniq | awk -F "[#=]" '{print $3" "$4}' | sort -k 1 | tail -n 1`
|
||||
|
||||
higher_version=`echo $higher_zkevm_item | awk '{print $1}'`
|
||||
|
||||
higher_commit=`echo $higher_zkevm_item | cut -d ' ' -f2 | cut -c-7`
|
||||
|
||||
echo "$higher_version $higher_commit"
|
||||
@@ -1 +0,0 @@
|
||||
nightly-2023-12-03
|
||||
@@ -1,9 +0,0 @@
|
||||
edition = "2021"
|
||||
|
||||
comment_width = 100
|
||||
imports_granularity = "Crate"
|
||||
max_width = 100
|
||||
newline_style = "Unix"
|
||||
# normalize_comments = true
|
||||
reorder_imports = true
|
||||
wrap_comments = true
|
||||
@@ -1,89 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fs::File;
|
||||
use anyhow::{bail, Result};
|
||||
|
||||
use crate::types::ProofType;
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CircuitConfig {
|
||||
pub hard_fork_name: String,
|
||||
pub params_path: String,
|
||||
pub assets_path: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct CoordinatorConfig {
|
||||
pub base_url: String,
|
||||
pub retry_count: u32,
|
||||
pub retry_wait_time_sec: u64,
|
||||
pub connection_timeout_sec: u64,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct L2GethConfig {
|
||||
pub endpoint: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
pub struct Config {
|
||||
pub prover_name: String,
|
||||
pub keystore_path: String,
|
||||
pub keystore_password: String,
|
||||
pub db_path: String,
|
||||
#[serde(default)]
|
||||
pub proof_type: ProofType,
|
||||
pub low_version_circuit: CircuitConfig,
|
||||
pub high_version_circuit: CircuitConfig,
|
||||
pub coordinator: CoordinatorConfig,
|
||||
pub l2geth: Option<L2GethConfig>,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
pub fn from_reader<R>(reader: R) -> Result<Self>
|
||||
where
|
||||
R: std::io::Read,
|
||||
{
|
||||
serde_json::from_reader(reader).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
pub fn from_file(file_name: String) -> Result<Self> {
|
||||
let file = File::open(file_name)?;
|
||||
Config::from_reader(&file)
|
||||
}
|
||||
}
|
||||
|
||||
static SCROLL_PROVER_ASSETS_DIR_ENV_NAME: &str = "SCROLL_PROVER_ASSETS_DIR";
|
||||
static mut SCROLL_PROVER_ASSETS_DIRS: Vec<String> = vec![];
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct AssetsDirEnvConfig {
|
||||
}
|
||||
|
||||
impl AssetsDirEnvConfig {
|
||||
pub fn init() -> Result<()> {
|
||||
let value = std::env::var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME)?;
|
||||
let dirs: Vec<&str> = value.split(',').collect();
|
||||
if dirs.len() != 2 {
|
||||
bail!("env variable SCROLL_PROVER_ASSETS_DIR value must be 2 parts seperated by comma.")
|
||||
}
|
||||
unsafe {
|
||||
SCROLL_PROVER_ASSETS_DIRS = dirs.into_iter().map(|s| s.to_string()).collect();
|
||||
log::info!("init SCROLL_PROVER_ASSETS_DIRS: {:?}", SCROLL_PROVER_ASSETS_DIRS);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn enable_first() {
|
||||
unsafe {
|
||||
log::info!("set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}", &SCROLL_PROVER_ASSETS_DIRS[0]);
|
||||
std::env::set_var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME, &SCROLL_PROVER_ASSETS_DIRS[0]);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn enable_second() {
|
||||
unsafe {
|
||||
log::info!("set env {SCROLL_PROVER_ASSETS_DIR_ENV_NAME} to {}", &SCROLL_PROVER_ASSETS_DIRS[1]);
|
||||
std::env::set_var(SCROLL_PROVER_ASSETS_DIR_ENV_NAME, &SCROLL_PROVER_ASSETS_DIRS[1]);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,135 +0,0 @@
|
||||
mod api;
|
||||
mod errors;
|
||||
pub mod listener;
|
||||
pub mod types;
|
||||
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use std::rc::Rc;
|
||||
|
||||
use api::API;
|
||||
use errors::*;
|
||||
use listener::Listener;
|
||||
use log;
|
||||
use tokio::runtime::Runtime;
|
||||
use types::*;
|
||||
|
||||
use crate::key_signer::KeySigner;
|
||||
use crate::config::Config;
|
||||
|
||||
pub struct CoordinatorClient<'a> {
|
||||
api: API,
|
||||
token: Option<String>,
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
rt: Runtime,
|
||||
listener: Box<dyn Listener>,
|
||||
}
|
||||
|
||||
impl<'a> CoordinatorClient<'a> {
|
||||
pub fn new(
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
listener: Box<dyn Listener>,
|
||||
) -> Result<Self> {
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
let api = API::new(&config.coordinator.base_url,
|
||||
core::time::Duration::from_secs(config.coordinator.connection_timeout_sec),
|
||||
config.coordinator.retry_count,
|
||||
config.coordinator.retry_wait_time_sec
|
||||
)?;
|
||||
let mut client = Self {
|
||||
api,
|
||||
token: None,
|
||||
config,
|
||||
key_signer,
|
||||
rt,
|
||||
listener,
|
||||
};
|
||||
client.login()?;
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
fn login(&mut self) -> Result<()> {
|
||||
let api = &self.api;
|
||||
let challenge_response = self.rt.block_on(api.challenge())?;
|
||||
if challenge_response.errcode != ErrorCode::Success {
|
||||
bail!("challenge failed: {}", challenge_response.errmsg)
|
||||
}
|
||||
let mut token: String;
|
||||
if let Some(r) = challenge_response.data {
|
||||
token = r.token;
|
||||
} else {
|
||||
bail!("challenge failed: got empty token")
|
||||
}
|
||||
|
||||
let login_message = LoginMessage {
|
||||
challenge: token.clone(),
|
||||
prover_name: self.config.prover_name.clone(),
|
||||
prover_version: crate::version::get_version(),
|
||||
};
|
||||
|
||||
let buffer = login_message.rlp();
|
||||
let signature = self.key_signer.sign_buffer(&buffer)?;
|
||||
let login_request = LoginRequest {
|
||||
message: login_message,
|
||||
signature,
|
||||
};
|
||||
let login_response = self.rt.block_on(api.login(&login_request, &token))?;
|
||||
if login_response.errcode != ErrorCode::Success {
|
||||
bail!("login failed: {}", login_response.errmsg)
|
||||
}
|
||||
if let Some(r) = login_response.data {
|
||||
token = r.token;
|
||||
} else {
|
||||
bail!("login failed: got empty token")
|
||||
}
|
||||
self.token = Some(token);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn action_with_re_login<T, F, R>(&mut self, req: &R, mut f: F) -> Result<Response<T>>
|
||||
where
|
||||
F: FnMut(&mut Self, &R) -> Result<Response<T>>,
|
||||
{
|
||||
let response = f(self, req)?;
|
||||
if response.errcode == ErrorCode::ErrJWTTokenExpired {
|
||||
log::info!("JWT expired, attempting to re-login");
|
||||
self.login().context("JWT expired, re-login failed")?;
|
||||
log::info!("re-login success");
|
||||
return self.action_with_re_login(req, f);
|
||||
} else if response.errcode != ErrorCode::Success {
|
||||
bail!("action failed: {}", response.errmsg)
|
||||
}
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
fn do_get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
|
||||
self.rt
|
||||
.block_on(self.api.get_task(req, self.token.as_ref().unwrap()))
|
||||
}
|
||||
|
||||
pub fn get_task(&mut self, req: &GetTaskRequest) -> Result<Response<GetTaskResponseData>> {
|
||||
self.action_with_re_login(req, |s, req| s.do_get_task(req))
|
||||
}
|
||||
|
||||
fn do_submit_proof(
|
||||
&mut self,
|
||||
req: &SubmitProofRequest,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let response = self
|
||||
.rt
|
||||
.block_on(self.api.submit_proof(req, &self.token.as_ref().unwrap()))?;
|
||||
self.listener.on_proof_submitted(req);
|
||||
Ok(response)
|
||||
}
|
||||
|
||||
pub fn submit_proof(
|
||||
&mut self,
|
||||
req: &SubmitProofRequest,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
self.action_with_re_login(req, |s, req| s.do_submit_proof(req))
|
||||
}
|
||||
}
|
||||
@@ -1,121 +0,0 @@
|
||||
use super::types::*;
|
||||
use anyhow::{bail, Result};
|
||||
use reqwest::{header::CONTENT_TYPE, Url};
|
||||
use serde::Serialize;
|
||||
use core::time::Duration;
|
||||
use reqwest_middleware::{ClientBuilder, ClientWithMiddleware};
|
||||
use reqwest_retry::{RetryTransientMiddleware, policies::ExponentialBackoff};
|
||||
|
||||
pub struct API {
|
||||
url_base: Url,
|
||||
send_timeout: Duration,
|
||||
pub client: ClientWithMiddleware,
|
||||
}
|
||||
|
||||
impl API {
|
||||
pub fn new(url_base: &String, send_timeout: Duration, retry_count: u32, retry_wait_time_sec: u64) -> Result<Self> {
|
||||
let retry_wait_duration = core::time::Duration::from_secs(retry_wait_time_sec);
|
||||
let retry_policy = ExponentialBackoff::builder()
|
||||
.retry_bounds(retry_wait_duration / 2, retry_wait_duration)
|
||||
.build_with_max_retries(retry_count);
|
||||
|
||||
let client = ClientBuilder::new(reqwest::Client::new())
|
||||
.with(RetryTransientMiddleware::new_with_policy(retry_policy))
|
||||
.build();
|
||||
|
||||
Ok(Self {
|
||||
url_base: Url::parse(&url_base)?,
|
||||
send_timeout,
|
||||
client,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn challenge(&self) -> Result<Response<ChallengeResponseData>> {
|
||||
let method = "/coordinator/v1/challenge";
|
||||
let url = self.build_url(method)?;
|
||||
|
||||
let response = self
|
||||
.client
|
||||
.get(url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.timeout(self.send_timeout)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let response_body = response.text().await?;
|
||||
|
||||
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
pub async fn login(
|
||||
&self,
|
||||
req: &LoginRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<LoginResponseData>> {
|
||||
let method = "/coordinator/v1/login";
|
||||
self.post_with_token(&method, req, token).await
|
||||
}
|
||||
|
||||
pub async fn get_task(
|
||||
&self,
|
||||
req: &GetTaskRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<GetTaskResponseData>> {
|
||||
let method = "/coordinator/v1/get_task";
|
||||
self.post_with_token(&method, req, token).await
|
||||
}
|
||||
|
||||
pub async fn submit_proof(
|
||||
&self,
|
||||
req: &SubmitProofRequest,
|
||||
token: &String,
|
||||
) -> Result<Response<SubmitProofResponseData>> {
|
||||
let method = "/coordinator/v1/submit_proof";
|
||||
self.post_with_token(&method, req, token).await
|
||||
}
|
||||
|
||||
async fn post_with_token<Req, Resp>(
|
||||
&self,
|
||||
method: &str,
|
||||
req: &Req,
|
||||
token: &String,
|
||||
) -> Result<Resp>
|
||||
where
|
||||
Req: ?Sized + Serialize,
|
||||
Resp: serde::de::DeserializeOwned,
|
||||
{
|
||||
let url = self.build_url(method)?;
|
||||
let request_body = serde_json::to_string(req)?;
|
||||
|
||||
log::info!("[coordinator client], {method}, request: {request_body}");
|
||||
let response = self
|
||||
.client
|
||||
.post(url)
|
||||
.header(CONTENT_TYPE, "application/json")
|
||||
.bearer_auth(token)
|
||||
.body(request_body)
|
||||
.timeout(self.send_timeout)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
if response.status() != http::status::StatusCode::OK {
|
||||
log::error!(
|
||||
"[coordinator client], {method}, status not ok: {}",
|
||||
response.status()
|
||||
);
|
||||
bail!(
|
||||
"[coordinator client], {method}, status not ok: {}",
|
||||
response.status()
|
||||
)
|
||||
}
|
||||
|
||||
let response_body = response.text().await?;
|
||||
|
||||
log::info!("[coordinator client], {method}, response: {response_body}");
|
||||
serde_json::from_str(&response_body).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
|
||||
fn build_url(&self, method: &str) -> Result<Url> {
|
||||
self.url_base.join(method).map_err(|e| anyhow::anyhow!(e))
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
use serde::{Deserialize, Deserializer};
|
||||
use log;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ErrorCode {
|
||||
Success,
|
||||
InternalServerError,
|
||||
|
||||
ErrProverStatsAPIParameterInvalidNo,
|
||||
ErrProverStatsAPIProverTaskFailure,
|
||||
ErrProverStatsAPIProverTotalRewardFailure,
|
||||
|
||||
ErrCoordinatorParameterInvalidNo,
|
||||
ErrCoordinatorGetTaskFailure,
|
||||
ErrCoordinatorHandleZkProofFailure,
|
||||
ErrCoordinatorEmptyProofData,
|
||||
|
||||
ErrJWTCommonErr,
|
||||
ErrJWTTokenExpired,
|
||||
|
||||
Undefined(i32),
|
||||
}
|
||||
|
||||
impl ErrorCode {
|
||||
fn from_i32(v: i32) -> Self {
|
||||
match v {
|
||||
0 => ErrorCode::Success,
|
||||
500 => ErrorCode::InternalServerError,
|
||||
10001 => ErrorCode::ErrProverStatsAPIParameterInvalidNo,
|
||||
10002 => ErrorCode::ErrProverStatsAPIProverTaskFailure,
|
||||
10003 => ErrorCode::ErrProverStatsAPIProverTotalRewardFailure,
|
||||
20001 => ErrorCode::ErrCoordinatorParameterInvalidNo,
|
||||
20002 => ErrorCode::ErrCoordinatorGetTaskFailure,
|
||||
20003 => ErrorCode::ErrCoordinatorHandleZkProofFailure,
|
||||
20004 => ErrorCode::ErrCoordinatorEmptyProofData,
|
||||
50000 => ErrorCode::ErrJWTCommonErr,
|
||||
50001 => ErrorCode::ErrJWTTokenExpired,
|
||||
_ => {
|
||||
log::error!("get unexpected error code from coordinator: {v}");
|
||||
ErrorCode::Undefined(v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ErrorCode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: i32 = i32::deserialize(deserializer)?;
|
||||
Ok(ErrorCode::from_i32(v))
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
use super::SubmitProofRequest;
|
||||
|
||||
pub trait Listener {
|
||||
fn on_proof_submitted(&self, req: &SubmitProofRequest);
|
||||
}
|
||||
@@ -1,75 +0,0 @@
|
||||
use crate::types::{ProofFailureType, ProofStatus};
|
||||
use rlp::RlpStream;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use super::errors::ErrorCode;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct Response<T> {
|
||||
pub errcode: ErrorCode,
|
||||
pub errmsg: String,
|
||||
pub data: Option<T>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginMessage {
|
||||
pub challenge: String,
|
||||
pub prover_name: String,
|
||||
pub prover_version: String,
|
||||
}
|
||||
|
||||
impl LoginMessage {
|
||||
pub fn rlp(&self) -> Vec<u8> {
|
||||
let mut rlp = RlpStream::new();
|
||||
let num_fields = 3;
|
||||
rlp.begin_list(num_fields);
|
||||
rlp.append(&self.prover_name);
|
||||
rlp.append(&self.prover_version);
|
||||
rlp.append(&self.challenge);
|
||||
rlp.out().freeze().into()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginRequest {
|
||||
pub message: LoginMessage,
|
||||
pub signature: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct LoginResponseData {
|
||||
pub time: String,
|
||||
pub token: String,
|
||||
}
|
||||
|
||||
pub type ChallengeResponseData = LoginResponseData;
|
||||
|
||||
#[derive(Default, Serialize, Deserialize)]
|
||||
pub struct GetTaskRequest {
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub prover_height: Option<u64>,
|
||||
pub vks: Vec<String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct GetTaskResponseData {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub task_data: String,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct SubmitProofRequest {
|
||||
pub uuid: String,
|
||||
pub task_id: String,
|
||||
pub task_type: crate::types::ProofType,
|
||||
pub status: ProofStatus,
|
||||
pub proof: String,
|
||||
pub failure_type: Option<ProofFailureType>,
|
||||
pub failure_msg: Option<String>,
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct SubmitProofResponseData {}
|
||||
@@ -1,124 +0,0 @@
|
||||
use crate::types::CommonHash;
|
||||
use anyhow::Result;
|
||||
use ethers_core::types::BlockNumber;
|
||||
use tokio::runtime::Runtime;
|
||||
|
||||
use ethers_core::types::{H256, U64};
|
||||
use serde::{Deserialize, Serialize, de::DeserializeOwned};
|
||||
use std::fmt::Debug;
|
||||
|
||||
use ethers_providers::{Http, Provider};
|
||||
|
||||
// ======================= types ============================
|
||||
|
||||
/// L2 block full trace which tracks to the version in golang.
|
||||
///
|
||||
/// The inner block_trace is a generic type, whose real implementation
|
||||
/// lies in two version's zkevm-circuits library.
|
||||
///
|
||||
/// The inner block_trace missed some fields compared to the go version.
|
||||
/// These fields are defined here for clarity although not used.
|
||||
#[derive(Deserialize, Serialize, Default, Debug, Clone)]
|
||||
pub struct BlockTrace<T> {
|
||||
#[serde(flatten)]
|
||||
pub block_trace: T,
|
||||
|
||||
pub version: String,
|
||||
|
||||
pub withdraw_trie_root: Option<CommonHash>,
|
||||
|
||||
#[serde(rename = "mptwitness", default)]
|
||||
pub mpt_witness: Vec<u8>,
|
||||
}
|
||||
|
||||
pub type TxHash = H256;
|
||||
|
||||
/// this struct is tracked to https://github.com/scroll-tech/go-ethereum/blob/0f0cd99f7a2e/core/types/block.go#Header
|
||||
/// the detail fields of struct are not 100% same as ethers_core::types::Block so this needs to be changed in
|
||||
/// some time currently only the `number` field is required
|
||||
#[derive(Debug, Deserialize, Serialize, Default)]
|
||||
pub struct Header {
|
||||
#[serde(flatten)]
|
||||
block: ethers_core::types::Block<TxHash>,
|
||||
}
|
||||
|
||||
impl Header {
|
||||
pub fn get_number(&self) -> Option<U64> {
|
||||
self.block.number
|
||||
}
|
||||
}
|
||||
|
||||
/// Serialize a type.
|
||||
///
|
||||
/// # Panics
|
||||
///
|
||||
/// If the type returns an error during serialization.
|
||||
pub fn serialize<T: serde::Serialize>(t: &T) -> serde_json::Value {
|
||||
serde_json::to_value(t).expect("Types never fail to serialize.")
|
||||
}
|
||||
|
||||
// ======================= geth client ============================
|
||||
|
||||
pub struct GethClient {
|
||||
id: String,
|
||||
provider: Provider<Http>,
|
||||
rt: Runtime,
|
||||
}
|
||||
|
||||
impl GethClient {
|
||||
pub fn new(id: &str, api_url: &str) -> Result<Self> {
|
||||
let provider = Provider::<Http>::try_from(api_url)?;
|
||||
let rt = tokio::runtime::Builder::new_current_thread()
|
||||
.enable_all()
|
||||
.build()?;
|
||||
|
||||
Ok(Self {
|
||||
id: id.to_string(),
|
||||
provider,
|
||||
rt,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_block_trace_by_hash<T>(&mut self, hash: &CommonHash) -> Result<BlockTrace<T>>
|
||||
where T: Serialize + DeserializeOwned + Debug + Send {
|
||||
log::info!(
|
||||
"{}: calling get_block_trace_by_hash, hash: {:#?}",
|
||||
self.id,
|
||||
hash
|
||||
);
|
||||
|
||||
let trace_future = self
|
||||
.provider
|
||||
.request("scroll_getBlockTraceByNumberOrHash", [format!("{hash:#x}")]);
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
|
||||
pub fn header_by_number(&mut self, block_number: &BlockNumber) -> Result<Header> {
|
||||
log::info!(
|
||||
"{}: calling header_by_number, hash: {:#?}",
|
||||
self.id,
|
||||
block_number
|
||||
);
|
||||
|
||||
let hash = serialize(block_number);
|
||||
let include_txs = serialize(&false);
|
||||
|
||||
let trace_future = self
|
||||
.provider
|
||||
.request("eth_getBlockByNumber", [hash, include_txs]);
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
|
||||
pub fn block_number(&mut self) -> Result<BlockNumber> {
|
||||
log::info!("{}: calling block_number", self.id);
|
||||
|
||||
let trace_future = self.provider.request("eth_blockNumber", ());
|
||||
|
||||
let trace = self.rt.block_on(trace_future)?;
|
||||
Ok(trace)
|
||||
}
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
use std::path::Path;
|
||||
|
||||
use anyhow::Result;
|
||||
use ethers_core::{
|
||||
k256::{
|
||||
ecdsa::{signature::hazmat::PrehashSigner, RecoveryId, Signature, SigningKey},
|
||||
elliptic_curve::{sec1::ToEncodedPoint, FieldBytes},
|
||||
PublicKey, Secp256k1, SecretKey,
|
||||
},
|
||||
types::Signature as EthSignature,
|
||||
};
|
||||
|
||||
use ethers_core::types::{H256, U256};
|
||||
use hex::ToHex;
|
||||
use tiny_keccak::{Hasher, Keccak};
|
||||
|
||||
pub struct KeySigner {
|
||||
public_key: PublicKey,
|
||||
signer: SigningKey,
|
||||
}
|
||||
|
||||
impl KeySigner {
|
||||
pub fn new(key_path: &str, passwd: &str) -> Result<Self> {
|
||||
let p = Path::new(key_path);
|
||||
|
||||
let secret = if !p.exists() {
|
||||
log::info!("[key_signer] key_path not exists, create one");
|
||||
let dir = p.parent().unwrap();
|
||||
let name = p.file_name().and_then(|s| s.to_str());
|
||||
let mut rng = rand::thread_rng();
|
||||
let (secret, _) = eth_keystore::new(dir, &mut rng, passwd, name)?;
|
||||
secret
|
||||
} else {
|
||||
log::info!("[key_signer] key_path already exists, load it");
|
||||
eth_keystore::decrypt_key(key_path, passwd).map_err(|e| anyhow::anyhow!(e))?
|
||||
};
|
||||
|
||||
let secret_key = SecretKey::from_bytes(secret.as_slice().into())?;
|
||||
|
||||
let signer = SigningKey::from(secret_key.clone());
|
||||
|
||||
Ok(Self {
|
||||
public_key: secret_key.public_key(),
|
||||
signer,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
let v: Vec<u8> = Vec::from(self.public_key.to_encoded_point(true).as_bytes());
|
||||
buffer_to_hex(&v, false)
|
||||
}
|
||||
|
||||
/// Signs the provided hash.
|
||||
pub fn sign_hash(&self, hash: H256) -> Result<EthSignature> {
|
||||
let signer = &self.signer as &dyn PrehashSigner<(Signature, RecoveryId)>;
|
||||
let (recoverable_sig, recovery_id) = signer.sign_prehash(hash.as_ref())?;
|
||||
|
||||
let v = u8::from(recovery_id) as u64;
|
||||
|
||||
let r_bytes: FieldBytes<Secp256k1> = recoverable_sig.r().into();
|
||||
let s_bytes: FieldBytes<Secp256k1> = recoverable_sig.s().into();
|
||||
let r = U256::from_big_endian(r_bytes.as_slice());
|
||||
let s = U256::from_big_endian(s_bytes.as_slice());
|
||||
|
||||
Ok(EthSignature { r, s, v })
|
||||
}
|
||||
|
||||
pub fn sign_buffer<T>(&self, buffer: &T) -> Result<String>
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
let pre_hash = keccak256(buffer);
|
||||
|
||||
let hash = H256::from(pre_hash);
|
||||
let sig = self.sign_hash(hash)?;
|
||||
|
||||
Ok(buffer_to_hex(&sig.to_vec(), true))
|
||||
}
|
||||
}
|
||||
|
||||
fn buffer_to_hex<T>(buffer: &T, has_prefix: bool) -> String
|
||||
where
|
||||
T: AsRef<[u8]>,
|
||||
{
|
||||
if has_prefix {
|
||||
format!("0x{}", buffer.encode_hex::<String>())
|
||||
} else {
|
||||
buffer.encode_hex::<String>()
|
||||
}
|
||||
}
|
||||
|
||||
/// Compute the Keccak-256 hash of input bytes.
|
||||
///
|
||||
/// Note that strings are interpreted as UTF-8 bytes,
|
||||
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
|
||||
let mut output = [0u8; 32];
|
||||
|
||||
let mut hasher = Keccak::v256();
|
||||
hasher.update(bytes.as_ref());
|
||||
hasher.finalize(&mut output);
|
||||
|
||||
output
|
||||
}
|
||||
@@ -1,76 +0,0 @@
|
||||
mod config;
|
||||
mod coordinator_client;
|
||||
mod geth_client;
|
||||
mod key_signer;
|
||||
mod prover;
|
||||
mod task_cache;
|
||||
mod task_processor;
|
||||
mod types;
|
||||
mod utils;
|
||||
mod version;
|
||||
mod zk_circuits_handler;
|
||||
|
||||
use clap::{Parser, ArgAction};
|
||||
use anyhow::Result;
|
||||
use config::{Config, AssetsDirEnvConfig};
|
||||
use prover::Prover;
|
||||
use std::rc::Rc;
|
||||
use task_cache::{ClearCacheCoordinatorListener, TaskCache};
|
||||
use task_processor::TaskProcessor;
|
||||
use log;
|
||||
|
||||
/// Simple program to greet a person
|
||||
#[derive(Parser, Debug)]
|
||||
#[clap(disable_version_flag = true)]
|
||||
struct Args {
|
||||
/// Path of config file
|
||||
#[arg(long = "config", default_value = "conf/config.json")]
|
||||
config_file: String,
|
||||
|
||||
/// Version of this prover
|
||||
#[arg(short, long, action = ArgAction::SetTrue)]
|
||||
version: bool,
|
||||
|
||||
/// Path of log file
|
||||
#[arg(long = "log.file")]
|
||||
log_file: Option<String>,
|
||||
}
|
||||
|
||||
fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let args = Args::parse();
|
||||
|
||||
if args.version {
|
||||
println!("version is {}", version::get_version());
|
||||
std::process::exit(0);
|
||||
}
|
||||
|
||||
utils::log_init(args.log_file);
|
||||
|
||||
let config: Config = Config::from_file(args.config_file)?;
|
||||
|
||||
if let Err(e) = AssetsDirEnvConfig::init() {
|
||||
log::error!("AssetsDirEnvConfig init failed: {:#}", e);
|
||||
std::process::exit(-2);
|
||||
}
|
||||
|
||||
let task_cache = Rc::new(TaskCache::new(&config.db_path)?);
|
||||
|
||||
let coordinator_listener = Box::new(ClearCacheCoordinatorListener {
|
||||
task_cache: task_cache.clone(),
|
||||
});
|
||||
|
||||
let prover = Prover::new(&config, coordinator_listener)?;
|
||||
|
||||
log::info!("prover start successfully. name: {}, type: {:?}, publickey: {}, version: {}",
|
||||
config.prover_name,
|
||||
config.proof_type,
|
||||
prover.get_public_key(),
|
||||
version::get_version(),
|
||||
);
|
||||
|
||||
let task_processor = TaskProcessor::new(&prover, task_cache);
|
||||
|
||||
task_processor.start();
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,177 +0,0 @@
|
||||
use anyhow::{bail, Context, Error, Ok, Result};
|
||||
use ethers_core::types::U64;
|
||||
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
use log;
|
||||
|
||||
use crate::{
|
||||
config::Config,
|
||||
coordinator_client::{
|
||||
listener::Listener, types::*, CoordinatorClient,
|
||||
},
|
||||
geth_client::GethClient,
|
||||
key_signer::KeySigner,
|
||||
types::{ProofFailureType, ProofStatus, ProofType},
|
||||
zk_circuits_handler::{CircuitsHandler, CircuitsHandlerProvider},
|
||||
};
|
||||
|
||||
use super::types::{ProofDetail, Task};
|
||||
|
||||
pub struct Prover<'a> {
|
||||
config: &'a Config,
|
||||
key_signer: Rc<KeySigner>,
|
||||
circuits_handler_provider: RefCell<CircuitsHandlerProvider<'a>>,
|
||||
coordinator_client: RefCell<CoordinatorClient<'a>>,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl<'a> Prover<'a> {
|
||||
pub fn new(config: &'a Config, coordinator_listener: Box<dyn Listener>) -> Result<Self> {
|
||||
let proof_type = config.proof_type;
|
||||
let keystore_path = &config.keystore_path;
|
||||
let keystore_password = &config.keystore_password;
|
||||
|
||||
let key_signer = Rc::new(KeySigner::new(&keystore_path, &keystore_password)?);
|
||||
let coordinator_client = CoordinatorClient::new(
|
||||
config,
|
||||
Rc::clone(&key_signer),
|
||||
coordinator_listener,
|
||||
).context("failed to create coordinator_client")?;
|
||||
|
||||
let geth_client = if config.proof_type == ProofType::ProofTypeChunk {
|
||||
Some(Rc::new(RefCell::new(GethClient::new(
|
||||
&config.prover_name,
|
||||
&config.l2geth.as_ref().unwrap().endpoint,
|
||||
).context("failed to create l2 geth_client")?)))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let provider = CircuitsHandlerProvider::new(
|
||||
proof_type,
|
||||
config,
|
||||
geth_client.clone(),
|
||||
).context("failed to create circuits handler provider")?;
|
||||
|
||||
let prover = Prover {
|
||||
config,
|
||||
key_signer: Rc::clone(&key_signer),
|
||||
circuits_handler_provider: RefCell::new(provider),
|
||||
coordinator_client: RefCell::new(coordinator_client),
|
||||
geth_client,
|
||||
};
|
||||
|
||||
Ok(prover)
|
||||
}
|
||||
|
||||
pub fn get_proof_type(&self) -> ProofType {
|
||||
self.config.proof_type
|
||||
}
|
||||
|
||||
pub fn get_public_key(&self) -> String {
|
||||
self.key_signer.get_public_key()
|
||||
}
|
||||
|
||||
pub fn fetch_task(&self) -> Result<Task> {
|
||||
log::info!("[prover] start to fetch_task");
|
||||
let mut req = GetTaskRequest {
|
||||
task_type: self.get_proof_type(),
|
||||
prover_height: None,
|
||||
vks: self.circuits_handler_provider.borrow().get_vks(),
|
||||
};
|
||||
|
||||
if self.get_proof_type() == ProofType::ProofTypeChunk {
|
||||
let latest_block_number = self.get_latest_block_number_value()?;
|
||||
if let Some(v) = latest_block_number {
|
||||
if v.as_u64() == 0 {
|
||||
bail!("omit to prove task of the genesis block")
|
||||
}
|
||||
req.prover_height = Some(v.as_u64());
|
||||
} else {
|
||||
log::error!("[prover] failed to fetch latest confirmed block number, got None");
|
||||
bail!("failed to fetch latest confirmed block number, got None")
|
||||
}
|
||||
}
|
||||
let resp = self.coordinator_client.borrow_mut().get_task(&req)?;
|
||||
|
||||
match resp.data {
|
||||
Some(d) => Ok(Task::from(d)),
|
||||
None => {
|
||||
bail!("data of get_task empty, while error_code is success. there may be something wrong in response data or inner logic.")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn prove_task(&self, task: &Task) -> Result<ProofDetail> {
|
||||
log::info!("[prover] start to prove_task, task id: {}", task.id);
|
||||
self.circuits_handler_provider.borrow_mut().clear_circuits_handler();
|
||||
if let Some(handler) = self.circuits_handler_provider.borrow_mut().get_circuits_handler(&task.hard_fork_name) {
|
||||
self.do_prove(task, handler)
|
||||
} else {
|
||||
log::error!("failed to get a circuit handler");
|
||||
bail!("failed to get a circuit handler")
|
||||
}
|
||||
}
|
||||
|
||||
fn do_prove(&self, task: &Task, handler: &Box<dyn CircuitsHandler>) -> Result<ProofDetail> {
|
||||
let mut proof_detail = ProofDetail {
|
||||
id: task.id.clone(),
|
||||
proof_type: task.task_type,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
proof_detail.proof_data = handler.get_proof_data(task.task_type, task)?;
|
||||
Ok(proof_detail)
|
||||
}
|
||||
|
||||
pub fn submit_proof(&self, proof_detail: ProofDetail, task: &Task) -> Result<()> {
|
||||
log::info!("[prover] start to submit_proof, task id: {}", proof_detail.id);
|
||||
|
||||
let request = SubmitProofRequest {
|
||||
uuid: task.uuid.clone(),
|
||||
task_id: proof_detail.id,
|
||||
task_type: proof_detail.proof_type,
|
||||
status: ProofStatus::Ok,
|
||||
proof: proof_detail.proof_data,
|
||||
hard_fork_name: task.hard_fork_name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.do_submit(&request)
|
||||
}
|
||||
|
||||
pub fn submit_error(
|
||||
&self,
|
||||
task: &Task,
|
||||
failure_type: ProofFailureType,
|
||||
error: Error,
|
||||
) -> Result<()> {
|
||||
log::info!("[prover] start to submit_error, task id: {}", task.id);
|
||||
let request = SubmitProofRequest {
|
||||
uuid: task.uuid.clone(),
|
||||
task_id: task.id.clone(),
|
||||
task_type: task.task_type,
|
||||
status: ProofStatus::Error,
|
||||
failure_type: Some(failure_type),
|
||||
failure_msg: Some(error.to_string()),
|
||||
hard_fork_name: task.hard_fork_name.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
self.do_submit(&request)
|
||||
}
|
||||
|
||||
fn do_submit(&self, request: &SubmitProofRequest) -> Result<()> {
|
||||
self.coordinator_client.borrow_mut().submit_proof(request)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_latest_block_number_value(&self) -> Result<Option<U64>> {
|
||||
let number = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.block_number()?;
|
||||
Ok(number.as_number())
|
||||
}
|
||||
}
|
||||
@@ -1,61 +0,0 @@
|
||||
use anyhow::{Ok, Result};
|
||||
|
||||
use super::coordinator_client::{listener::Listener, types::SubmitProofRequest};
|
||||
use crate::types::TaskWrapper;
|
||||
use sled::{Config, Db};
|
||||
use std::rc::Rc;
|
||||
use log;
|
||||
|
||||
pub struct TaskCache {
|
||||
db: Db,
|
||||
}
|
||||
|
||||
impl TaskCache {
|
||||
pub fn new(db_path: &String) -> Result<Self> {
|
||||
let config = Config::new().path(db_path);
|
||||
let db = config.open()?;
|
||||
log::info!("[task_cache] initiate successfully to {db_path}");
|
||||
Ok(Self { db })
|
||||
}
|
||||
|
||||
pub fn put_task(&self, task_wrapper: &TaskWrapper) -> Result<()> {
|
||||
let k = task_wrapper.task.id.clone().into_bytes();
|
||||
let v = serde_json::to_vec(task_wrapper)?;
|
||||
self.db.insert(k, v)?;
|
||||
log::info!("[task_cache] put_task with task_id: {}", task_wrapper.task.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn get_last_task(&self) -> Result<Option<TaskWrapper>> {
|
||||
let last = self.db.last()?;
|
||||
if let Some((k, v)) = last {
|
||||
let kk = std::str::from_utf8(k.as_ref())?;
|
||||
let task_wrapper: TaskWrapper = serde_json::from_slice(v.as_ref())?;
|
||||
log::info!("[task_cache] get_last_task with task_id: {kk}, count: {}", task_wrapper.get_count());
|
||||
return Ok(Some(task_wrapper));
|
||||
}
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
pub fn delete_task(&self, task_id: String) -> Result<()> {
|
||||
let k = task_id.clone().into_bytes();
|
||||
self.db.remove(k)?;
|
||||
log::info!("[task cache] delete_task with task_id: {task_id}");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// ========================= listener ===========================
|
||||
|
||||
pub struct ClearCacheCoordinatorListener {
|
||||
pub task_cache: Rc<TaskCache>,
|
||||
}
|
||||
|
||||
impl Listener for ClearCacheCoordinatorListener {
|
||||
fn on_proof_submitted(&self, req: &SubmitProofRequest) {
|
||||
let result = self.task_cache.delete_task(req.task_id.clone());
|
||||
if let Err(e) = result {
|
||||
log::error!("delete task from embed db failed, {:#}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
use super::{prover::Prover, task_cache::TaskCache};
|
||||
use anyhow::{Context, Result};
|
||||
use log;
|
||||
use std::rc::Rc;
|
||||
|
||||
pub struct TaskProcessor<'a> {
|
||||
prover: &'a Prover<'a>,
|
||||
task_cache: Rc<TaskCache>,
|
||||
}
|
||||
|
||||
impl<'a> TaskProcessor<'a> {
|
||||
pub fn new(prover: &'a Prover<'a>, task_cache: Rc<TaskCache>) -> Self {
|
||||
TaskProcessor { prover, task_cache }
|
||||
}
|
||||
|
||||
pub fn start(&self) {
|
||||
loop {
|
||||
log::info!("start a new round.");
|
||||
if let Err(err) = self.prove_and_submit() {
|
||||
log::error!("encounter error: {:#}", err);
|
||||
} else {
|
||||
log::info!("prove & submit succeed.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn prove_and_submit(&self) -> Result<()> {
|
||||
let task_from_cache = self
|
||||
.task_cache
|
||||
.get_last_task()
|
||||
.context("failed to peek from stack")?;
|
||||
|
||||
let mut task_wrapper = match task_from_cache {
|
||||
Some(t) => t,
|
||||
None => {
|
||||
let fetch_result = self.prover.fetch_task();
|
||||
if let Err(err) = fetch_result {
|
||||
std::thread::sleep(core::time::Duration::from_secs(10));
|
||||
return Err(err).context("failed to fetch task from coordinator");
|
||||
}
|
||||
fetch_result.unwrap().into()
|
||||
}
|
||||
};
|
||||
|
||||
if task_wrapper.get_count() <= 2 {
|
||||
task_wrapper.increment_count();
|
||||
self.task_cache
|
||||
.put_task(&task_wrapper)
|
||||
.context("failed to push task into stack, updating count")?;
|
||||
|
||||
log::info!(
|
||||
"start to prove task, task_type: {:?}, task_id: {}",
|
||||
task_wrapper.task.task_type,
|
||||
task_wrapper.task.id
|
||||
);
|
||||
let result = match self.prover.prove_task(&task_wrapper.task) {
|
||||
Ok(proof_detail) => self
|
||||
.prover
|
||||
.submit_proof(proof_detail, &task_wrapper.task),
|
||||
Err(error) => self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::NoPanic,
|
||||
error,
|
||||
),
|
||||
};
|
||||
return result;
|
||||
}
|
||||
|
||||
// if tried times >= 3, it's probably due to circuit proving panic
|
||||
log::error!(
|
||||
"zk proving panic for task, task_type: {:?}, task_id: {}",
|
||||
task_wrapper.task.task_type,
|
||||
task_wrapper.task.id
|
||||
);
|
||||
self.prover.submit_error(
|
||||
&task_wrapper.task,
|
||||
super::types::ProofFailureType::Panic,
|
||||
anyhow::anyhow!("zk proving panic for task"),
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,195 +0,0 @@
|
||||
use ethers_core::types::H256;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
|
||||
use crate::coordinator_client::types::GetTaskResponseData;
|
||||
|
||||
pub type CommonHash = H256;
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofType {
|
||||
ProofTypeUndefined,
|
||||
ProofTypeChunk,
|
||||
ProofTypeBatch,
|
||||
}
|
||||
|
||||
impl ProofType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofType::ProofTypeChunk,
|
||||
2 => ProofType::ProofTypeBatch,
|
||||
_ => ProofType::ProofTypeUndefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofType::ProofTypeUndefined => serializer.serialize_i8(0),
|
||||
ProofType::ProofTypeChunk => serializer.serialize_i8(1),
|
||||
ProofType::ProofTypeBatch => serializer.serialize_i8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofType {
|
||||
fn default() -> Self {
|
||||
Self::ProofTypeUndefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct Task {
|
||||
pub uuid: String,
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub task_type: ProofType,
|
||||
pub task_data: String,
|
||||
#[serde(default)]
|
||||
pub hard_fork_name: String,
|
||||
}
|
||||
|
||||
impl From<GetTaskResponseData> for Task {
|
||||
fn from(value: GetTaskResponseData) -> Self {
|
||||
Self {
|
||||
uuid: value.uuid,
|
||||
id: value.task_id,
|
||||
task_type: value.task_type,
|
||||
task_data: value.task_data,
|
||||
hard_fork_name: value.hard_fork_name,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct TaskWrapper {
|
||||
pub task: Task,
|
||||
count: usize,
|
||||
}
|
||||
|
||||
impl TaskWrapper {
|
||||
pub fn increment_count(&mut self) {
|
||||
self.count += 1;
|
||||
}
|
||||
|
||||
pub fn get_count(&self) -> usize {
|
||||
self.count
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Task> for TaskWrapper {
|
||||
fn from(task: Task) -> Self {
|
||||
TaskWrapper { task, count: 0 }
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default)]
|
||||
pub struct ProofDetail {
|
||||
pub id: String,
|
||||
#[serde(rename = "type", default)]
|
||||
pub proof_type: ProofType,
|
||||
pub proof_data: String,
|
||||
pub error: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofFailureType {
|
||||
Undefined,
|
||||
Panic,
|
||||
NoPanic,
|
||||
}
|
||||
|
||||
impl ProofFailureType {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
1 => ProofFailureType::Panic,
|
||||
2 => ProofFailureType::NoPanic,
|
||||
_ => ProofFailureType::Undefined,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofFailureType {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofFailureType::Undefined => serializer.serialize_u8(0),
|
||||
ProofFailureType::Panic => serializer.serialize_u8(1),
|
||||
ProofFailureType::NoPanic => serializer.serialize_u8(2),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofFailureType {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofFailureType::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofFailureType {
|
||||
fn default() -> Self {
|
||||
Self::Undefined
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq)]
|
||||
pub enum ProofStatus {
|
||||
Ok,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl ProofStatus {
|
||||
fn from_u8(v: u8) -> Self {
|
||||
match v {
|
||||
0 => ProofStatus::Ok,
|
||||
_ => ProofStatus::Error,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for ProofStatus {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
match *self {
|
||||
ProofStatus::Ok => serializer.serialize_u8(0),
|
||||
ProofStatus::Error => serializer.serialize_u8(1),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ProofStatus {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let v: u8 = u8::deserialize(deserializer)?;
|
||||
Ok(ProofStatus::from_u8(v))
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ProofStatus {
|
||||
fn default() -> Self {
|
||||
Self::Ok
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
use env_logger::Env;
|
||||
use std::sync::Once;
|
||||
use std::fs::OpenOptions;
|
||||
|
||||
static LOG_INIT: Once = Once::new();
|
||||
|
||||
/// Initialize log
|
||||
pub fn log_init(log_file: Option<String>) {
|
||||
LOG_INIT.call_once(|| {
|
||||
let mut builder = env_logger::Builder::from_env(Env::default().default_filter_or("info"));
|
||||
if let Some(file_path) = log_file {
|
||||
let target= Box::new(
|
||||
OpenOptions::new()
|
||||
.write(true)
|
||||
.create(true)
|
||||
.truncate(false)
|
||||
.open(file_path)
|
||||
.expect("Can't create log file")
|
||||
);
|
||||
builder.target(env_logger::Target::Pipe(target));
|
||||
}
|
||||
builder.init();
|
||||
});
|
||||
}
|
||||
@@ -1,18 +0,0 @@
|
||||
use std::cell::OnceCell;
|
||||
|
||||
static DEFAULT_COMMIT: &str = "unknown";
|
||||
static mut VERSION: OnceCell<String> = OnceCell::new();
|
||||
|
||||
pub const TAG: &str = "v0.0.0";
|
||||
pub const DEFAULT_ZK_VERSION: &str = "000000-000000";
|
||||
|
||||
fn init_version() -> String {
|
||||
let commit = option_env!("GIT_REV").unwrap_or(DEFAULT_COMMIT);
|
||||
let tag = option_env!("GO_TAG").unwrap_or(TAG);
|
||||
let zk_version = option_env!("ZK_VERSION").unwrap_or(DEFAULT_ZK_VERSION);
|
||||
format!("{tag}-{commit}-{zk_version}")
|
||||
}
|
||||
|
||||
pub fn get_version() -> String {
|
||||
unsafe { VERSION.get_or_init(init_version).clone() }
|
||||
}
|
||||
@@ -1,125 +0,0 @@
|
||||
mod bernoulli;
|
||||
mod curie;
|
||||
|
||||
use anyhow::Result;
|
||||
use bernoulli::BaseCircuitsHandler;
|
||||
use curie::NextCircuitsHandler;
|
||||
use std::collections::HashMap;
|
||||
use std::{cell::RefCell, rc::Rc};
|
||||
use super::geth_client::GethClient;
|
||||
use crate::{config::{Config, AssetsDirEnvConfig}, types::{ProofType, Task}};
|
||||
|
||||
type HardForkName = String;
|
||||
|
||||
pub mod utils {
|
||||
pub fn encode_vk(vk: Vec<u8>) -> String {
|
||||
base64::encode(vk)
|
||||
}
|
||||
}
|
||||
|
||||
pub trait CircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>>;
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &Task) -> Result<String>;
|
||||
}
|
||||
|
||||
type CircuitsHandlerBuilder = fn(proof_type: ProofType, config: &Config, geth_client: Option<Rc<RefCell<GethClient>>>) -> Result<Box<dyn CircuitsHandler>>;
|
||||
|
||||
pub struct CircuitsHandlerProvider<'a> {
|
||||
proof_type: ProofType,
|
||||
config: &'a Config,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
circuits_handler_builder_map: HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
|
||||
current_hard_fork_name: Option<HardForkName>,
|
||||
current_circuit: Option<Box<dyn CircuitsHandler>>,
|
||||
vks: Vec<String>,
|
||||
}
|
||||
|
||||
impl<'a> CircuitsHandlerProvider<'a> {
|
||||
pub fn new(proof_type: ProofType, config: &'a Config, geth_client: Option<Rc<RefCell<GethClient>>>) -> Result<Self> {
|
||||
let mut m: HashMap<HardForkName, CircuitsHandlerBuilder> = HashMap::new();
|
||||
|
||||
fn handler_builder(proof_type: ProofType, config: &Config, geth_client: Option<Rc<RefCell<GethClient>>>) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!("now init zk circuits handler, hard_fork_name: {}", &config.low_version_circuit.hard_fork_name);
|
||||
AssetsDirEnvConfig::enable_first();
|
||||
BaseCircuitsHandler::new(proof_type,
|
||||
&config.low_version_circuit.params_path,
|
||||
&config.low_version_circuit.assets_path,
|
||||
geth_client
|
||||
).map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
m.insert(config.low_version_circuit.hard_fork_name.clone(), handler_builder);
|
||||
|
||||
fn next_handler_builder(proof_type: ProofType, config: &Config, geth_client: Option<Rc<RefCell<GethClient>>>) -> Result<Box<dyn CircuitsHandler>> {
|
||||
log::info!("now init zk circuits handler, hard_fork_name: {}", &config.high_version_circuit.hard_fork_name);
|
||||
AssetsDirEnvConfig::enable_second();
|
||||
NextCircuitsHandler::new(proof_type,
|
||||
&config.high_version_circuit.params_path,
|
||||
&config.high_version_circuit.assets_path,
|
||||
geth_client
|
||||
).map(|handler| Box::new(handler) as Box<dyn CircuitsHandler>)
|
||||
}
|
||||
|
||||
m.insert(config.high_version_circuit.hard_fork_name.clone(), next_handler_builder);
|
||||
|
||||
let vks = CircuitsHandlerProvider::init_vks(proof_type, config, &m, geth_client.clone());
|
||||
|
||||
let provider = CircuitsHandlerProvider {
|
||||
proof_type,
|
||||
config,
|
||||
geth_client,
|
||||
circuits_handler_builder_map: m,
|
||||
current_hard_fork_name: None,
|
||||
current_circuit: None,
|
||||
vks,
|
||||
};
|
||||
|
||||
Ok(provider)
|
||||
}
|
||||
|
||||
pub fn get_circuits_handler(&mut self, hard_fork_name: &String) -> Option<&Box<dyn CircuitsHandler>> {
|
||||
match &self.current_hard_fork_name {
|
||||
Some(name) if name == hard_fork_name => {
|
||||
log::info!("get circuits handler from cache");
|
||||
(&self.current_circuit).as_ref()
|
||||
},
|
||||
_ => {
|
||||
log::info!("failed to get circuits handler from cache, create a new one: {hard_fork_name}");
|
||||
let builder = self.circuits_handler_builder_map.get(hard_fork_name);
|
||||
builder.and_then(|build| {
|
||||
log::info!("building circuits handler for {hard_fork_name}");
|
||||
let handler = build(self.proof_type, &self.config, self.geth_client.clone()).expect("failed to build circuits handler");
|
||||
self.current_hard_fork_name = Some(hard_fork_name.clone());
|
||||
self.current_circuit = Some(handler);
|
||||
(&self.current_circuit).as_ref()
|
||||
} )
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn clear_circuits_handler(&mut self) {
|
||||
log::info!("clear circuits handler to prevent cache, just for test");
|
||||
self.current_hard_fork_name = None;
|
||||
self.current_circuit = None;
|
||||
}
|
||||
|
||||
fn init_vks(proof_type: ProofType, config: &'a Config,
|
||||
circuits_handler_builder_map: &HashMap<HardForkName, CircuitsHandlerBuilder>,
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>) -> Vec<String> {
|
||||
circuits_handler_builder_map
|
||||
.iter()
|
||||
.map(|(hard_fork_name, build)| {
|
||||
let handler = build(proof_type, config, geth_client.clone()).expect("failed to build circuits handler");
|
||||
let vk = handler.get_vk(proof_type)
|
||||
.map_or("".to_string(), |vk| utils::encode_vk(vk));
|
||||
log::info!("vk for {hard_fork_name} is {vk}");
|
||||
vk
|
||||
})
|
||||
.collect::<Vec<String>>()
|
||||
}
|
||||
|
||||
pub fn get_vks(&self) -> Vec<String> {
|
||||
self.vks.clone()
|
||||
}
|
||||
}
|
||||
@@ -1,194 +0,0 @@
|
||||
use super::CircuitsHandler;
|
||||
use once_cell::sync::Lazy;
|
||||
use crate::{geth_client::GethClient, types::ProofType};
|
||||
use anyhow::{bail, Ok, Result};
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::types::{Task, CommonHash};
|
||||
use std::{cell::RefCell, env, cmp::Ordering, rc::Rc};
|
||||
use prover::{aggregator::Prover as BatchProver, zkevm::Prover as ChunkProver};
|
||||
use prover::{BlockTrace, ChunkHash, ChunkProof};
|
||||
|
||||
// Only used for debugging.
|
||||
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
|
||||
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkHash>,
|
||||
pub chunk_proofs: Vec<ChunkProof>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
pub struct BaseCircuitsHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver>>,
|
||||
batch_prover: Option<RefCell<BatchProver>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl BaseCircuitsHandler {
|
||||
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str, geth_client: Option<Rc<RefCell<GethClient>>>) -> Result<Self> {
|
||||
match proof_type {
|
||||
ProofType::ProofTypeChunk => Ok(Self {
|
||||
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
|
||||
batch_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
|
||||
ProofType::ProofTypeBatch => Ok(Self {
|
||||
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
_ => bail!("proof type invalid"),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_chunk_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_trace = self.gen_chunk_traces(task)?;
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk_proof = prover
|
||||
.borrow_mut()
|
||||
.gen_chunk_proof(chunk_trace, None, None, self.get_output_dir())?;
|
||||
|
||||
return serde_json::to_string(&chunk_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_hashes_proofs: Vec<(ChunkHash, ChunkProof)> =
|
||||
self.gen_chunk_hashes_proofs(task)?;
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let is_valid = prover
|
||||
.borrow_mut()
|
||||
.check_chunk_proofs(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol, task-id: {}", &task.id)
|
||||
}
|
||||
let batch_proof = prover
|
||||
.borrow_mut()
|
||||
.gen_agg_evm_proof(
|
||||
chunk_hashes_proofs,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return serde_json::to_string(&batch_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
|
||||
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
|
||||
let chunk_task_detail: ChunkTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkHash, ChunkProof)>> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
|
||||
Ok(batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.chunk_proofs.clone())
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(
|
||||
&self,
|
||||
block_hashes: &Vec<CommonHash>,
|
||||
) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.len() == 0 {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
bail!("block_hashes are empty")
|
||||
}
|
||||
|
||||
let mut block_traces = Vec::new();
|
||||
for (_, hash) in block_hashes.into_iter().enumerate() {
|
||||
let trace = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.get_block_trace_by_hash(hash)?;
|
||||
block_traces.push(trace.block_trace);
|
||||
}
|
||||
|
||||
block_traces.sort_by(|a, b| {
|
||||
if get_block_number(a) == None {
|
||||
Ordering::Less
|
||||
} else if get_block_number(b) == None {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
get_block_number(a)
|
||||
.unwrap()
|
||||
.cmp(&get_block_number(b).unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
let block_numbers: Vec<u64> = block_traces
|
||||
.iter()
|
||||
.map(|trace| match get_block_number(trace) {
|
||||
Some(v) => v,
|
||||
None => 0,
|
||||
})
|
||||
.collect();
|
||||
let mut i = 0;
|
||||
while i < block_numbers.len() - 1 {
|
||||
if block_numbers[i] + 1 != block_numbers[i + 1] {
|
||||
log::error!("[prover] block numbers are not continuous, got {} and {}", block_numbers[i], block_numbers[i + 1]);
|
||||
bail!(
|
||||
"block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
)
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Ok(block_traces)
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitsHandler for BaseCircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
ProofType::ProofTypeChunk => {
|
||||
self.chunk_prover.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk())
|
||||
},
|
||||
ProofType::ProofTypeBatch => {
|
||||
self.batch_prover.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk())
|
||||
},
|
||||
_ => unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
ProofType::ProofTypeChunk => self.gen_chunk_proof(task),
|
||||
ProofType::ProofTypeBatch => self.gen_batch_proof(task),
|
||||
_ => unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,198 +0,0 @@
|
||||
use super::CircuitsHandler;
|
||||
use crate::{geth_client::GethClient, types::ProofType};
|
||||
use anyhow::{bail, Context, Ok, Result};
|
||||
use serde::Deserialize;
|
||||
|
||||
use crate::types::{Task, CommonHash};
|
||||
use std::{cell::RefCell, cmp::Ordering, rc::Rc};
|
||||
|
||||
use prover_next::{BlockTrace, ChunkInfo, ChunkProof, ChunkProvingTask, BatchProvingTask};
|
||||
use prover_next::{aggregator::Prover as BatchProver, check_chunk_hashes, zkevm::Prover as ChunkProver};
|
||||
|
||||
use super::bernoulli::OUTPUT_DIR;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct BatchTaskDetail {
|
||||
pub chunk_infos: Vec<ChunkInfo>,
|
||||
pub chunk_proofs: Vec<ChunkProof>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
pub struct ChunkTaskDetail {
|
||||
pub block_hashes: Vec<CommonHash>,
|
||||
}
|
||||
|
||||
fn get_block_number(block_trace: &BlockTrace) -> Option<u64> {
|
||||
block_trace.header.number.map(|n| n.as_u64())
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct NextCircuitsHandler {
|
||||
chunk_prover: Option<RefCell<ChunkProver>>,
|
||||
batch_prover: Option<RefCell<BatchProver>>,
|
||||
|
||||
geth_client: Option<Rc<RefCell<GethClient>>>,
|
||||
}
|
||||
|
||||
impl NextCircuitsHandler {
|
||||
pub fn new(proof_type: ProofType, params_dir: &str, assets_dir: &str, geth_client: Option<Rc<RefCell<GethClient>>>) -> Result<Self> {
|
||||
match proof_type {
|
||||
ProofType::ProofTypeChunk => Ok(Self {
|
||||
chunk_prover: Some(RefCell::new(ChunkProver::from_dirs(params_dir, assets_dir))),
|
||||
batch_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
|
||||
ProofType::ProofTypeBatch => Ok(Self {
|
||||
batch_prover: Some(RefCell::new(BatchProver::from_dirs(params_dir, assets_dir))),
|
||||
chunk_prover: None,
|
||||
geth_client,
|
||||
}),
|
||||
_ => bail!("proof type invalid"),
|
||||
}
|
||||
}
|
||||
|
||||
fn gen_chunk_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
let chunk_trace = self.gen_chunk_traces(task)?;
|
||||
if let Some(prover) = self.chunk_prover.as_ref() {
|
||||
let chunk = ChunkProvingTask::from(chunk_trace);
|
||||
|
||||
let chunk_proof = prover
|
||||
.borrow_mut()
|
||||
.gen_chunk_proof(chunk, None, None, self.get_output_dir())?;
|
||||
|
||||
return serde_json::to_string(&chunk_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn gen_batch_proof(&self, task: &crate::types::Task) -> Result<String> {
|
||||
if let Some(prover) = self.batch_prover.as_ref() {
|
||||
let chunk_hashes_proofs: Vec<(ChunkInfo, ChunkProof)> =
|
||||
self.gen_chunk_hashes_proofs(task)?;
|
||||
let chunk_proofs: Vec<ChunkProof> =
|
||||
chunk_hashes_proofs.iter().map(|t| t.1.clone()).collect();
|
||||
|
||||
let is_valid = prover
|
||||
.borrow_mut()
|
||||
.check_protocol_of_chunks(&chunk_proofs);
|
||||
|
||||
if !is_valid {
|
||||
bail!("non-match chunk protocol, task-id: {}", &task.id)
|
||||
}
|
||||
check_chunk_hashes("", &chunk_hashes_proofs).context("failed to check chunk info")?;
|
||||
let batch = BatchProvingTask {
|
||||
chunk_proofs
|
||||
};
|
||||
let batch_proof = prover
|
||||
.borrow_mut()
|
||||
.gen_agg_evm_proof(
|
||||
batch,
|
||||
None,
|
||||
self.get_output_dir(),
|
||||
)?;
|
||||
|
||||
return serde_json::to_string(&batch_proof).map_err(|e| anyhow::anyhow!(e));
|
||||
}
|
||||
unreachable!("please check errors in proof_type logic")
|
||||
}
|
||||
|
||||
fn get_output_dir(&self) -> Option<&str> {
|
||||
OUTPUT_DIR.as_deref()
|
||||
}
|
||||
|
||||
fn gen_chunk_traces(&self, task: &Task) -> Result<Vec<BlockTrace>> {
|
||||
let chunk_task_detail: ChunkTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
self.get_sorted_traces_by_hashes(&chunk_task_detail.block_hashes)
|
||||
}
|
||||
|
||||
fn gen_chunk_hashes_proofs(&self, task: &Task) -> Result<Vec<(ChunkInfo, ChunkProof)>> {
|
||||
let batch_task_detail: BatchTaskDetail = serde_json::from_str(&task.task_data)?;
|
||||
|
||||
Ok(batch_task_detail
|
||||
.chunk_infos
|
||||
.clone()
|
||||
.into_iter()
|
||||
.zip(batch_task_detail.chunk_proofs.clone())
|
||||
.collect())
|
||||
}
|
||||
|
||||
fn get_sorted_traces_by_hashes(
|
||||
&self,
|
||||
block_hashes: &Vec<CommonHash>,
|
||||
) -> Result<Vec<BlockTrace>> {
|
||||
if block_hashes.len() == 0 {
|
||||
log::error!("[prover] failed to get sorted traces: block_hashes are empty");
|
||||
bail!("block_hashes are empty")
|
||||
}
|
||||
|
||||
let mut block_traces = Vec::new();
|
||||
for (_, hash) in block_hashes.into_iter().enumerate() {
|
||||
let trace = self
|
||||
.geth_client
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.borrow_mut()
|
||||
.get_block_trace_by_hash(hash)?;
|
||||
block_traces.push(trace.block_trace);
|
||||
}
|
||||
|
||||
block_traces.sort_by(|a, b| {
|
||||
if get_block_number(a) == None {
|
||||
Ordering::Less
|
||||
} else if get_block_number(b) == None {
|
||||
Ordering::Greater
|
||||
} else {
|
||||
get_block_number(a)
|
||||
.unwrap()
|
||||
.cmp(&get_block_number(b).unwrap())
|
||||
}
|
||||
});
|
||||
|
||||
let block_numbers: Vec<u64> = block_traces
|
||||
.iter()
|
||||
.map(|trace| match get_block_number(trace) {
|
||||
Some(v) => v,
|
||||
None => 0,
|
||||
})
|
||||
.collect();
|
||||
let mut i = 0;
|
||||
while i < block_numbers.len() - 1 {
|
||||
if block_numbers[i] + 1 != block_numbers[i + 1] {
|
||||
log::error!("[prover] block numbers are not continuous, got {} and {}", block_numbers[i], block_numbers[i + 1]);
|
||||
bail!(
|
||||
"block numbers are not continuous, got {} and {}",
|
||||
block_numbers[i],
|
||||
block_numbers[i + 1]
|
||||
)
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Ok(block_traces)
|
||||
}
|
||||
}
|
||||
|
||||
impl CircuitsHandler for NextCircuitsHandler {
|
||||
fn get_vk(&self, task_type: ProofType) -> Option<Vec<u8>> {
|
||||
match task_type {
|
||||
ProofType::ProofTypeChunk => {
|
||||
self.chunk_prover.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk())
|
||||
},
|
||||
ProofType::ProofTypeBatch => {
|
||||
self.batch_prover.as_ref()
|
||||
.and_then(|prover| prover.borrow().get_vk())
|
||||
},
|
||||
_ => unreachable!()
|
||||
}
|
||||
}
|
||||
|
||||
fn get_proof_data(&self, task_type: ProofType, task: &crate::types::Task) -> Result<String> {
|
||||
match task_type {
|
||||
ProofType::ProofTypeChunk => self.gen_chunk_proof(task),
|
||||
ProofType::ProofTypeBatch => self.gen_batch_proof(task),
|
||||
_ => unreachable!()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2,14 +2,29 @@
|
||||
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
mock_abi:
|
||||
cd .. && solc --evm-version cancun --bin --abi --optimize --overwrite -o ./build/bin ./rollup/mock_bridge/MockBridge.sol
|
||||
cd .. && go run github.com/scroll-tech/go-ethereum/cmd/abigen --bin=./build/bin/MockBridge.bin --abi=./build/bin/MockBridge.abi --pkg=mock_bridge --out=./rollup/mock_bridge/MockBridge.go
|
||||
|
||||
rollup_bins: ## Builds the Rollup bins.
|
||||
libzstd:
|
||||
sudo mkdir -p $(SCROLL_LIB_PATH)/
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/v0.0.0-rc0-ubuntu20.04/libzktrie.so
|
||||
sudo wget -O $(SCROLL_LIB_PATH)/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/v0.0.0-rc0-ubuntu20.04/libscroll_zstd.so
|
||||
|
||||
rollup_bins: libzstd ## Builds the Rollup bins.
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
export LD_LIBRARY_PATH=$$LD_LIBRARY_PATH:$(SCROLL_LIB_PATH) && \
|
||||
export CGO_LDFLAGS="-L$(SCROLL_LIB_PATH) -Wl,-rpath,$(SCROLL_LIB_PATH)" && \
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
|
||||
@@ -101,7 +101,7 @@ var L2MessageQueueMetaData = &bind.MetaData{
|
||||
|
||||
// L1GasPriceOracleMetaData contains all meta data concerning the L1GasPriceOracle contract.
|
||||
var L1GasPriceOracleMetaData = &bind.MetaData{
|
||||
ABI: "[{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"BlobScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"CommitScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BlobBaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"inputs\":[],\"name\":\"blobScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"commitScalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BlobBaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"},{\"internalType\":\"uint256\",\"name\":\"_l1BlobBaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFeeAndBlobBaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"}]",
|
||||
ABI: "[{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_owner\",\"type\":\"address\"}],\"stateMutability\":\"nonpayable\",\"type\":\"constructor\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"L1BaseFeeUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"overhead\",\"type\":\"uint256\"}],\"name\":\"OverheadUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_oldOwner\",\"type\":\"address\"},{\"indexed\":true,\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"OwnershipTransferred\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"uint256\",\"name\":\"scalar\",\"type\":\"uint256\"}],\"name\":\"ScalarUpdated\",\"type\":\"event\"},{\"anonymous\":false,\"inputs\":[{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_oldWhitelist\",\"type\":\"address\"},{\"indexed\":false,\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"UpdateWhitelist\",\"type\":\"event\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1Fee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"bytes\",\"name\":\"_data\",\"type\":\"bytes\"}],\"name\":\"getL1GasUsed\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"l1BaseFee\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"overhead\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"owner\",\"outputs\":[{\"internalType\":\"address\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"renounceOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"scalar\",\"outputs\":[{\"internalType\":\"uint256\",\"name\":\"\",\"type\":\"uint256\"}],\"stateMutability\":\"view\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_l1BaseFee\",\"type\":\"uint256\"}],\"name\":\"setL1BaseFee\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_overhead\",\"type\":\"uint256\"}],\"name\":\"setOverhead\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"uint256\",\"name\":\"_scalar\",\"type\":\"uint256\"}],\"name\":\"setScalar\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newOwner\",\"type\":\"address\"}],\"name\":\"transferOwnership\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[{\"internalType\":\"address\",\"name\":\"_newWhitelist\",\"type\":\"address\"}],\"name\":\"updateWhitelist\",\"outputs\":[],\"stateMutability\":\"nonpayable\",\"type\":\"function\"},{\"inputs\":[],\"name\":\"whitelist\",\"outputs\":[{\"internalType\":\"contract IWhitelist\",\"name\":\"\",\"type\":\"address\"}],\"stateMutability\":\"view\",\"type\":\"function\"}]\n",
|
||||
}
|
||||
|
||||
// IL1ScrollMessengerL2MessageProof is an auto generated low-level Go binding around an user-defined struct.
|
||||
|
||||
@@ -70,15 +70,13 @@
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634880
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
|
||||
@@ -10,7 +10,7 @@ require (
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/holiman/uint256 v1.2.4
|
||||
github.com/prometheus/client_golang v1.16.0
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.9.0
|
||||
|
||||
@@ -236,8 +236,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7 h1:CDrPMqifvAVyYqu0x1J5qickVV0b51tApPnOwDYLESI=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240605080813-32bfc9fccde7/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703 h1:dcvPPyyfe3SocOBwgww3e1wcWjgF85kKDsohY4TXII0=
|
||||
github.com/scroll-tech/da-codec v0.0.0-20240524071411-769db9f7e703/go.mod h1:1wWYii0OPwd5kw+xrz0PFgS420xNadrNF1x/ELJT+TM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea h1:CH1WXWrpEpLaP3N+bFs2a1xdE0+lRm1AuJQb5YvE6Ls=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240426041101-a860446ebaea/go.mod h1:i4VBgWoaW/y0D8MmQb7hSOulyw1dKhuiSFAbznwivCA=
|
||||
github.com/scroll-tech/zktrie v0.8.2 h1:UMuIfA+jdgWMLmTgTL64Emo+zzMOdcnH0+eYdDcshxQ=
|
||||
|
||||
@@ -33,7 +33,6 @@ type ChunkProposerConfig struct {
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
}
|
||||
|
||||
// BatchProposerConfig loads batch_proposer configuration items.
|
||||
@@ -42,5 +41,4 @@ type BatchProposerConfig struct {
|
||||
MaxL1CommitCalldataSizePerBatch uint64 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
MaxUncompressedBatchBytesSize uint64 `json:"max_uncompressed_batch_bytes_size"`
|
||||
}
|
||||
|
||||
@@ -36,8 +36,7 @@ type Layer1Relayer struct {
|
||||
gasOracleSender *sender.Sender
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
lastBaseFee uint64
|
||||
lastBlobBaseFee uint64
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
l1BaseFeeWeight float64
|
||||
@@ -131,45 +130,37 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
block := blocks[0]
|
||||
|
||||
if types.GasOracleStatus(block.GasOracleStatus) == types.GasOraclePending {
|
||||
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
if r.lastGasPrice > 0 && expectedDelta == 0 {
|
||||
expectedDelta = 1
|
||||
}
|
||||
|
||||
latestL2Height, err := r.l2BlockOrm.GetL2BlocksLatestHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L2 block height from db", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
var isBernoulli = block.BlobBaseFee > 0 && r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height))
|
||||
var isCurie = block.BlobBaseFee > 0 && r.chainCfg.IsCurie(new(big.Int).SetUint64(latestL2Height))
|
||||
var isBernoulli = r.chainCfg.IsBernoulli(new(big.Int).SetUint64(latestL2Height))
|
||||
|
||||
var baseFee uint64
|
||||
var blobBaseFee uint64
|
||||
if isCurie {
|
||||
baseFee = block.BaseFee
|
||||
blobBaseFee = block.BlobBaseFee
|
||||
} else if isBernoulli {
|
||||
if isBernoulli && block.BlobBaseFee != 0 {
|
||||
baseFee = uint64(math.Ceil(r.l1BaseFeeWeight*float64(block.BaseFee) + r.l1BlobBaseFeeWeight*float64(block.BlobBaseFee)))
|
||||
} else {
|
||||
baseFee = block.BaseFee
|
||||
}
|
||||
|
||||
if r.shouldUpdateGasOracle(baseFee, blobBaseFee, isCurie) {
|
||||
var data []byte
|
||||
if isCurie {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFeeAndBlobBaseFee", new(big.Int).SetUint64(baseFee), new(big.Int).SetUint64(blobBaseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFeeAndBlobBaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
data, err = r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
return
|
||||
}
|
||||
// last is undefined or (baseFee >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (baseFee >= r.minGasPrice && (baseFee >= r.lastGasPrice+expectedDelta || baseFee <= r.lastGasPrice-expectedDelta)) {
|
||||
data, err := r.l1GasOracleABI.Pack("setL1BaseFee", new(big.Int).SetUint64(baseFee))
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL1BaseFee", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "isBernoulli", isBernoulli, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(block.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("Failed to send gas oracle update tx to layer2", "block.Hash", block.Hash, "block.Height", block.Number, "block.BaseFee", baseFee, "block.BlobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie, "err", err)
|
||||
log.Error("Failed to send setL1BaseFee tx to layer2 ", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -178,12 +169,9 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "block.Hash", block.Hash, "block.Height", block.Number, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
r.lastBaseFee = baseFee
|
||||
r.lastBlobBaseFee = blobBaseFee
|
||||
r.metrics.rollupL1RelayerLatestBaseFee.Set(float64(r.lastBaseFee))
|
||||
r.metrics.rollupL1RelayerLatestBlobBaseFee.Set(float64(r.lastBlobBaseFee))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "blobBaseFee", blobBaseFee, "isBernoulli", isBernoulli, "isCurie", isCurie)
|
||||
r.lastGasPrice = baseFee
|
||||
r.metrics.rollupL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee, "isBernoulli", isBernoulli)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -231,34 +219,3 @@ func (r *Layer1Relayer) StopSenders() {
|
||||
r.gasOracleSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64, isCurie bool) bool {
|
||||
// Right after restarting.
|
||||
if r.lastBaseFee == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
|
||||
if baseFee >= r.minGasPrice && (baseFee >= r.lastBaseFee+expectedBaseFeeDelta || baseFee+expectedBaseFeeDelta <= r.lastBaseFee) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Omitting blob base fee checks before Curie.
|
||||
if !isCurie {
|
||||
return false
|
||||
}
|
||||
|
||||
// Right after enabling Curie.
|
||||
if r.lastBlobBaseFee == 0 {
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBlobBaseFeeDelta := r.lastBlobBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
// Plus a minimum of 0.01 gwei, since the blob base fee is usually low, preventing short-time flunctuation.
|
||||
expectedBlobBaseFeeDelta += 10000000
|
||||
if blobBaseFee >= r.minGasPrice && (blobBaseFee >= r.lastBlobBaseFee+expectedBlobBaseFeeDelta || blobBaseFee+expectedBlobBaseFeeDelta <= r.lastBlobBaseFee) {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -9,8 +9,7 @@ import (
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
rollupL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL1RelayerLatestBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLatestBlobBaseFee prometheus.Gauge
|
||||
rollupL1RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL1UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL1UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
}
|
||||
@@ -27,13 +26,9 @@ func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
Name: "rollup_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
rollupL1RelayerLatestBaseFee: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer1_latest_base_fee",
|
||||
Help: "The latest base fee of l1 rollup relayer",
|
||||
}),
|
||||
rollupL1RelayerLatestBlobBaseFee: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer1_latest_blob_base_fee",
|
||||
Help: "The latest blob base fee of l1 rollup relayer",
|
||||
rollupL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of rollup relayer l1",
|
||||
}),
|
||||
rollupL1UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer1_update_gas_oracle_confirmed_total",
|
||||
|
||||
@@ -32,7 +32,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
return db
|
||||
}
|
||||
|
||||
// testCreateNewL1Relayer test create new relayer instance and stop
|
||||
// testCreateNewRelayer test create new relayer instance and stop
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -311,7 +311,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
}
|
||||
|
||||
// last is undefine or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64+expectedDelta <= r.lastGasPrice)) {
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice && (suggestGasPriceUint64 >= r.lastGasPrice+expectedDelta || suggestGasPriceUint64 <= r.lastGasPrice-expectedDelta)) {
|
||||
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
|
||||
|
||||
@@ -319,10 +319,10 @@ func testAccessListTransactionGasLimit(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
if txType == LegacyTxType { // Legacy transactions can not have an access list.
|
||||
assert.Equal(t, uint64(43935), gasLimit)
|
||||
assert.Equal(t, uint64(43956), gasLimit)
|
||||
assert.Nil(t, accessList)
|
||||
} else { // Dynamic fee and blob transactions can have an access list.
|
||||
assert.Equal(t, uint64(43458), gasLimit)
|
||||
assert.Equal(t, uint64(43479), gasLimit)
|
||||
assert.NotNil(t, accessList)
|
||||
}
|
||||
|
||||
|
||||
@@ -34,7 +34,6 @@ type BatchProposer struct {
|
||||
maxL1CommitCalldataSizePerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
forkMap map[uint64]bool
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
@@ -62,7 +61,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
p := &BatchProposer{
|
||||
@@ -75,7 +73,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, chai
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
forkMap: forkMap,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -238,12 +235,13 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
p.recordTimerBatchMetrics(metrics)
|
||||
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(metrics.L1CommitGas))
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch || totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize || metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
|
||||
if metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
// The first chunk exceeds hard limits, which indicates a bug in the chunk-proposer, manual fix is needed.
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize, p.maxUncompressedBatchBytesSize)
|
||||
return fmt.Errorf("the first chunk exceeds limits; start block number: %v, end block number: %v, limits: %+v, maxChunkNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxBlobSize: %v",
|
||||
dbChunks[0].StartBlockNumber, dbChunks[0].EndBlockNumber, metrics, maxChunksThisBatch, p.maxL1CommitCalldataSizePerBatch, p.maxL1CommitGasPerBatch, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
@@ -253,9 +251,7 @@ func (p *BatchProposer) proposeBatch() error {
|
||||
"overEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGas", p.maxL1CommitGasPerBatch,
|
||||
"l1CommitBlobSize", metrics.L1CommitBlobSize,
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
|
||||
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
|
||||
"maxBlobSize", maxBlobSize)
|
||||
|
||||
batch.Chunks = batch.Chunks[:len(batch.Chunks)-1]
|
||||
|
||||
|
||||
@@ -287,9 +287,9 @@ func testBatchProposerCodecv1Limits(t *testing.T) {
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -431,7 +431,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
@@ -442,9 +441,9 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -452,7 +451,6 @@ func testBatchProposerCodecv2Limits(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: tt.maxL1CommitCalldataSize,
|
||||
BatchTimeoutSec: tt.batchTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{
|
||||
BernoulliBlock: big.NewInt(0),
|
||||
CurieBlock: big.NewInt(0),
|
||||
@@ -606,9 +604,9 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -636,7 +634,7 @@ func testBatchCommitGasAndCalldataSizeCodecv1Estimation(t *testing.T) {
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
@@ -679,16 +677,15 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: 1000000,
|
||||
ChunkTimeoutSec: 300,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
cp.TryProposeChunk() // chunk1 contains block1
|
||||
cp.TryProposeChunk() // chunk2 contains block2
|
||||
|
||||
chunks, err := chunkOrm.GetChunksInRange(context.Background(), 1, 2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(1124), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[0].TotalL1CommitCalldataSize)
|
||||
assert.Equal(t, uint64(1124), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(2084), chunks[1].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(60), chunks[1].TotalL1CommitCalldataSize)
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
@@ -696,7 +693,6 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
@@ -717,7 +713,7 @@ func testBatchCommitGasAndCalldataSizeCodecv2Estimation(t *testing.T) {
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(chunk.ProvingStatus))
|
||||
}
|
||||
|
||||
assert.Equal(t, uint64(159350), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(161270), batches[0].TotalL1CommitGas)
|
||||
assert.Equal(t, uint64(120), batches[0].TotalL1CommitCalldataSize)
|
||||
}
|
||||
|
||||
@@ -764,7 +760,6 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
blockHeight := int64(0)
|
||||
@@ -785,7 +780,6 @@ func testBatchProposerBlobSizeLimit(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 30; i++ {
|
||||
@@ -857,7 +851,6 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: 0,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
block = readBlockFromJSON(t, "../../../testdata/blockTrace_03.json")
|
||||
@@ -873,7 +866,6 @@ func testBatchProposerMaxChunkNumPerBatchLimit(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerBatch: math.MaxUint64,
|
||||
BatchTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
|
||||
@@ -19,6 +19,8 @@ import (
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
const maxBlobSize = uint64(131072)
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
type ChunkProposer struct {
|
||||
ctx context.Context
|
||||
@@ -34,7 +36,6 @@ type ChunkProposer struct {
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
forkHeights []uint64
|
||||
|
||||
chainCfg *params.ChainConfig
|
||||
@@ -66,7 +67,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec,
|
||||
"gasCostIncreaseMultiplier", cfg.GasCostIncreaseMultiplier,
|
||||
"maxUncompressedBatchBytesSize", cfg.MaxUncompressedBatchBytesSize,
|
||||
"forkHeights", forkHeights)
|
||||
|
||||
p := &ChunkProposer{
|
||||
@@ -81,7 +81,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, chai
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
forkHeights: forkHeights,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -187,7 +186,6 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() error {
|
||||
// unchunkedBlockHeight >= 1, assuming genesis batch with chunk 0, block 0 is committed.
|
||||
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -218,17 +216,6 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
codecVersion = encoding.CodecV2
|
||||
}
|
||||
|
||||
// Including Curie block in a sole chunk.
|
||||
if p.chainCfg.CurieBlock != nil && blocks[0].Header.Number.Cmp(p.chainCfg.CurieBlock) == 0 {
|
||||
chunk := encoding.Chunk{Blocks: blocks[:1]}
|
||||
metrics, calcErr := utils.CalculateChunkMetrics(&chunk, codecVersion)
|
||||
if calcErr != nil {
|
||||
return fmt.Errorf("failed to calculate chunk metrics: %w", calcErr)
|
||||
}
|
||||
p.recordTimerChunkMetrics(metrics)
|
||||
return p.updateDBChunkInfo(&chunk, codecVersion, *metrics)
|
||||
}
|
||||
|
||||
var chunk encoding.Chunk
|
||||
for i, block := range blocks {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
@@ -245,12 +232,11 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
metrics.L1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
overEstimatedL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
metrics.CrcMax > p.maxRowConsumptionPerChunk ||
|
||||
metrics.L1CommitBlobSize > maxBlobSize ||
|
||||
metrics.L1CommitUncompressedBatchBytesSize > p.maxUncompressedBatchBytesSize {
|
||||
metrics.L1CommitBlobSize > maxBlobSize {
|
||||
if i == 0 {
|
||||
// The first block exceeds hard limits, which indicates a bug in the sequencer, manual fix is needed.
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v, maxUncompressedBatchBytesSize: %v",
|
||||
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize, p.maxUncompressedBatchBytesSize)
|
||||
return fmt.Errorf("the first block exceeds limits; block number: %v, limits: %+v, maxTxNum: %v, maxL1CommitCalldataSize: %v, maxL1CommitGas: %v, maxRowConsumption: %v, maxBlobSize: %v",
|
||||
block.Header.Number, metrics, p.maxTxNumPerChunk, p.maxL1CommitCalldataSizePerChunk, p.maxL1CommitGasPerChunk, p.maxRowConsumptionPerChunk, maxBlobSize)
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
@@ -264,9 +250,7 @@ func (p *ChunkProposer) proposeChunk() error {
|
||||
"rowConsumption", metrics.CrcMax,
|
||||
"maxRowConsumption", p.maxRowConsumptionPerChunk,
|
||||
"l1CommitBlobSize", metrics.L1CommitBlobSize,
|
||||
"maxBlobSize", maxBlobSize,
|
||||
"L1CommitUncompressedBatchBytesSize", metrics.L1CommitUncompressedBatchBytesSize,
|
||||
"maxUncompressedBatchBytesSize", p.maxUncompressedBatchBytesSize)
|
||||
"maxBlobSize", maxBlobSize)
|
||||
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1]
|
||||
|
||||
|
||||
@@ -544,7 +544,6 @@ func testChunkProposerCodecv2Limits(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: tt.maxRowConsumption,
|
||||
ChunkTimeoutSec: tt.chunkTimeoutSec,
|
||||
GasCostIncreaseMultiplier: 1.2,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), HomesteadBlock: tt.forkBlock}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
@@ -594,7 +593,6 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
for i := 0; i < 10; i++ {
|
||||
@@ -626,39 +624,3 @@ func testChunkProposerBlobSizeLimit(t *testing.T) {
|
||||
database.CloseDB(db)
|
||||
}
|
||||
}
|
||||
|
||||
func testChunkProposerIncludeCurieBlockInOneChunk(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
block := readBlockFromJSON(t, "../../../testdata/blockTrace_02.json")
|
||||
for i := int64(0); i < 10; i++ {
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
block.Header.Number = big.NewInt(i)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*encoding.Block{block})
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxBlockNumPerChunk: math.MaxUint64,
|
||||
MaxTxNumPerChunk: math.MaxUint64,
|
||||
MaxL1CommitGasPerChunk: math.MaxUint64,
|
||||
MaxL1CommitCalldataSizePerChunk: math.MaxUint64,
|
||||
MaxRowConsumptionPerChunk: math.MaxUint64,
|
||||
ChunkTimeoutSec: math.MaxUint64,
|
||||
GasCostIncreaseMultiplier: 1,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(1), CurieBlock: big.NewInt(2)}, db, nil)
|
||||
|
||||
for i := 0; i < 2; i++ {
|
||||
cp.TryProposeChunk()
|
||||
}
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, chunks, 2)
|
||||
for i, chunk := range chunks {
|
||||
assert.Equal(t, uint64(i+1), chunk.EndBlockNumber)
|
||||
}
|
||||
database.CloseDB(db)
|
||||
}
|
||||
|
||||
@@ -1,5 +1,3 @@
|
||||
package watcher
|
||||
|
||||
const contractEventsBlocksFetchLimit = int64(10)
|
||||
|
||||
const maxBlobSize = uint64(131072)
|
||||
|
||||
@@ -109,7 +109,6 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestChunkProposerCodecv1Limits", testChunkProposerCodecv1Limits)
|
||||
t.Run("TestChunkProposerCodecv2Limits", testChunkProposerCodecv2Limits)
|
||||
t.Run("TestChunkProposerBlobSizeLimit", testChunkProposerBlobSizeLimit)
|
||||
t.Run("TestChunkProposerIncludeCurieBlockInOneChunk", testChunkProposerIncludeCurieBlockInOneChunk)
|
||||
|
||||
// Run batch proposer test cases.
|
||||
t.Run("TestBatchProposerCodecv0Limits", testBatchProposerCodecv0Limits)
|
||||
|
||||
@@ -83,9 +83,6 @@ type ChunkMetrics struct {
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// codecv2 metrics, default 0 for codecv0 & codecv1
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
// timing metrics
|
||||
EstimateGasTime time.Duration
|
||||
EstimateCalldataSizeTime time.Duration
|
||||
@@ -146,7 +143,7 @@ func CalculateChunkMetrics(chunk *encoding.Chunk, codecVersion encoding.CodecVer
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBatchSizeAndBlobSize(chunk)
|
||||
metrics.L1CommitBlobSize, err = codecv2.EstimateChunkL1CommitBlobSize(chunk)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 chunk L1 commit blob size: %w", err)
|
||||
@@ -169,9 +166,6 @@ type BatchMetrics struct {
|
||||
// codecv1 metrics, default 0 for codecv0
|
||||
L1CommitBlobSize uint64
|
||||
|
||||
// codecv2 metrics, default 0 for codecv0 & codecv1
|
||||
L1CommitUncompressedBatchBytesSize uint64
|
||||
|
||||
// timing metrics
|
||||
EstimateGasTime time.Duration
|
||||
EstimateCalldataSizeTime time.Duration
|
||||
@@ -226,7 +220,7 @@ func CalculateBatchMetrics(batch *encoding.Batch, codecVersion encoding.CodecVer
|
||||
metrics.EstimateCalldataSizeTime = time.Since(start)
|
||||
|
||||
start = time.Now()
|
||||
metrics.L1CommitUncompressedBatchBytesSize, metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBatchSizeAndBlobSize(batch)
|
||||
metrics.L1CommitBlobSize, err = codecv2.EstimateBatchL1CommitBlobSize(batch)
|
||||
metrics.EstimateBlobSizeTime = time.Since(start)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to estimate codecv2 batch L1 commit blob size: %w", err)
|
||||
|
||||
@@ -82,25 +82,14 @@ contract MockBridge {
|
||||
/// point evaluation precompile
|
||||
uint256 constant BLS_MODULUS = 52435875175126190479447740508185965837690552500527637822603658699938581184513;
|
||||
|
||||
uint256 public l1BaseFee;
|
||||
uint256 public l1BlobBaseFee;
|
||||
uint256 public l2BaseFee;
|
||||
uint256 public lastFinalizedBatchIndex;
|
||||
mapping(uint256 => bytes32) public committedBatches;
|
||||
mapping(uint256 => bytes32) public finalizedStateRoots;
|
||||
mapping(uint256 => bytes32) public withdrawRoots;
|
||||
|
||||
function setL1BaseFee(uint256 _l1BaseFee) external {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
}
|
||||
|
||||
function setL1BaseFeeAndBlobBaseFee(uint256 _l1BaseFee, uint256 _l1BlobBaseFee) external {
|
||||
l1BaseFee = _l1BaseFee;
|
||||
l1BlobBaseFee = _l1BlobBaseFee;
|
||||
}
|
||||
|
||||
function setL2BaseFee(uint256 _l2BaseFee) external {
|
||||
l2BaseFee = _l2BaseFee;
|
||||
function setL2BaseFee(uint256 _newL2BaseFee) external {
|
||||
l2BaseFee = _newL2BaseFee;
|
||||
}
|
||||
|
||||
/*****************************
|
||||
|
||||
17
rollup/run_test.sh
Executable file
17
rollup/run_test.sh
Executable file
@@ -0,0 +1,17 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Download .so files
|
||||
export LIBSCROLL_ZSTD_VERSION=v0.0.0-rc0-ubuntu20.04
|
||||
export SCROLL_LIB_PATH=/scroll/lib
|
||||
|
||||
sudo mkdir -p $SCROLL_LIB_PATH
|
||||
|
||||
sudo wget -O $SCROLL_LIB_PATH/libzktrie.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libzktrie.so
|
||||
sudo wget -O $SCROLL_LIB_PATH/libscroll_zstd.so https://github.com/scroll-tech/da-codec/releases/download/$LIBSCROLL_ZSTD_VERSION/libscroll_zstd.so
|
||||
|
||||
# Set the environment variable
|
||||
export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$SCROLL_LIB_PATH
|
||||
export CGO_LDFLAGS="-L$SCROLL_LIB_PATH -Wl,-rpath,$SCROLL_LIB_PATH"
|
||||
|
||||
# Run module tests
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
196
rollup/scripts/get_batch_compression_ratio/main.go
Normal file
196
rollup/scripts/get_batch_compression_ratio/main.go
Normal file
@@ -0,0 +1,196 @@
|
||||
package main
|
||||
|
||||
/*
|
||||
#cgo LDFLAGS: -lm -ldl -lscroll_zstd
|
||||
#include <stdint.h>
|
||||
char* compress_scroll_batch_bytes(uint8_t* src, uint64_t src_size, uint8_t* output_buf, uint64_t *output_buf_size);
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/da-codec/encoding/codecv1"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
if len(os.Args) < 3 {
|
||||
log.Crit("Please provide start and end batch indices: ./script <start_index> <end_index>")
|
||||
return
|
||||
}
|
||||
|
||||
startIndexStr := os.Args[1]
|
||||
endIndexStr := os.Args[2]
|
||||
|
||||
startIndex, err := strconv.Atoi(startIndexStr)
|
||||
if err != nil || startIndex <= 0 {
|
||||
log.Crit("Invalid start batch index", "indexStr", startIndexStr, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
endIndex, err := strconv.Atoi(endIndexStr)
|
||||
if err != nil || endIndex <= 0 {
|
||||
log.Crit("Invalid end batch index", "indexStr", endIndexStr, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if startIndex > endIndex {
|
||||
log.Crit("Start index must be less than or equal to end index")
|
||||
return
|
||||
}
|
||||
|
||||
db, err := database.InitDB(&database.Config{
|
||||
DriverName: "postgres",
|
||||
DSN: "postgres://postgres:scroll2022@localhost:7432/scroll",
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
batchOrm := orm.NewBatch(db)
|
||||
|
||||
totalRawSize := uint64(0)
|
||||
totalCompressedSize := uint64(0)
|
||||
|
||||
for i := startIndex; i <= endIndex; i++ {
|
||||
batchIndex := uint64(i)
|
||||
dbBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch", "index", batchIndex, "err", err)
|
||||
}
|
||||
|
||||
dbParentBatch, err := batchOrm.GetBatchByIndex(context.Background(), batchIndex-1)
|
||||
if err != nil {
|
||||
log.Crit("failed to get parent batch", "index", batchIndex-1, "err", err)
|
||||
}
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), dbBatch.StartChunkIndex, dbBatch.EndChunkIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch chunks", "err", err)
|
||||
}
|
||||
|
||||
chunks := make([]*encoding.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
blocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Crit("failed to fetch blocks", "err", err)
|
||||
}
|
||||
chunks[i] = &encoding.Chunk{Blocks: blocks}
|
||||
}
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: dbBatch.Index,
|
||||
TotalL1MessagePoppedBefore: dbChunks[0].TotalL1MessagesPoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbParentBatch.Hash),
|
||||
Chunks: chunks,
|
||||
}
|
||||
|
||||
raw, compressed, blobSize, err := estimateBatchL1CommitBlobSize(batch)
|
||||
if err != nil {
|
||||
log.Crit("failed to estimate batch l1 commit blob size", "err", err)
|
||||
}
|
||||
|
||||
// compression_ratio = preimage_bytes / compressed_bytes
|
||||
log.Info("compression ratio", "batch index", batch.Index, "raw length", raw, "compressed length", compressed, "blobSize", blobSize, "ratio", float64(raw)/float64(compressed))
|
||||
|
||||
totalRawSize += raw
|
||||
totalCompressedSize += compressed
|
||||
}
|
||||
|
||||
batchCount := endIndex - startIndex + 1
|
||||
averageRawSize := float64(totalRawSize) / float64(batchCount)
|
||||
averageCompressedSize := float64(totalCompressedSize) / float64(batchCount)
|
||||
|
||||
log.Info("Average compression ratio", "average raw length", averageRawSize, "average compressed length", averageCompressedSize, "ratio", averageRawSize/averageCompressedSize)
|
||||
}
|
||||
|
||||
func estimateBatchL1CommitBlobSize(b *encoding.Batch) (uint64, uint64, uint64, error) {
|
||||
batchBytes, err := constructBatchPayload(b.Chunks)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
blobBytes, err := compressScrollBatchBytes(batchBytes)
|
||||
if err != nil {
|
||||
return 0, 0, 0, err
|
||||
}
|
||||
return uint64(len(batchBytes)), uint64(len(blobBytes)), codecv1.CalculatePaddedBlobSize(uint64(len(blobBytes))), nil
|
||||
}
|
||||
|
||||
// constructBatchPayload constructs the batch payload.
|
||||
// This function is only used in compressed batch payload length estimation.
|
||||
func constructBatchPayload(chunks []*encoding.Chunk) ([]byte, error) {
|
||||
// metadata consists of num_chunks (2 bytes) and chunki_size (4 bytes per chunk)
|
||||
metadataLength := 2 + 45*4
|
||||
|
||||
// the raw (un-compressed and un-padded) blob payload
|
||||
batchBytes := make([]byte, metadataLength)
|
||||
|
||||
// batch metadata: num_chunks
|
||||
binary.BigEndian.PutUint16(batchBytes[0:], uint16(len(chunks)))
|
||||
|
||||
// encode batch metadata and L2 transactions,
|
||||
for chunkID, chunk := range chunks {
|
||||
currentChunkStartIndex := len(batchBytes)
|
||||
|
||||
for _, block := range chunk.Blocks {
|
||||
for _, tx := range block.Transactions {
|
||||
if tx.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
|
||||
// encode L2 txs into batch payload
|
||||
rlpTxData, err := encoding.ConvertTxDataToRLPEncoding(tx, false /* no mock */)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
batchBytes = append(batchBytes, rlpTxData...)
|
||||
}
|
||||
}
|
||||
|
||||
// batch metadata: chunki_size
|
||||
if chunkSize := len(batchBytes) - currentChunkStartIndex; chunkSize != 0 {
|
||||
binary.BigEndian.PutUint32(batchBytes[2+4*chunkID:], uint32(chunkSize))
|
||||
}
|
||||
}
|
||||
return batchBytes, nil
|
||||
}
|
||||
|
||||
// compressScrollBatchBytes compresses the given batch of bytes.
|
||||
// The output buffer is allocated with an extra 128 bytes to accommodate metadata overhead or error message.
|
||||
func compressScrollBatchBytes(batchBytes []byte) ([]byte, error) {
|
||||
srcSize := C.uint64_t(len(batchBytes))
|
||||
outbufSize := C.uint64_t(len(batchBytes) + 128) // Allocate output buffer with extra 128 bytes
|
||||
outbuf := make([]byte, outbufSize)
|
||||
|
||||
if err := C.compress_scroll_batch_bytes((*C.uchar)(unsafe.Pointer(&batchBytes[0])), srcSize,
|
||||
(*C.uchar)(unsafe.Pointer(&outbuf[0])), &outbufSize); err != nil {
|
||||
return nil, fmt.Errorf("failed to compress scroll batch bytes: %s", C.GoString(err))
|
||||
}
|
||||
|
||||
return outbuf[:int(outbufSize)], nil
|
||||
}
|
||||
@@ -40,8 +40,8 @@ var (
|
||||
l1Client *ethclient.Client
|
||||
l2Client *ethclient.Client
|
||||
|
||||
// l1Auth
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
)
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
@@ -82,7 +82,6 @@ func setupEnv(t *testing.T) {
|
||||
var (
|
||||
err error
|
||||
l1GethChainID *big.Int
|
||||
l2GethChainID *big.Int
|
||||
)
|
||||
|
||||
testApps = tc.NewTestcontainerApps()
|
||||
@@ -97,8 +96,6 @@ func setupEnv(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
l1GethChainID, err = l1Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l2GethChainID, err = l2Client.ChainID(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Cfg, l2Cfg := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Cfg.Confirmations = 0
|
||||
@@ -106,16 +103,17 @@ func setupEnv(t *testing.T) {
|
||||
l2Cfg.Confirmations = 0
|
||||
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(l2Cfg.RelayerConfig.CommitSenderPrivateKey, l1GethChainID)
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(rollupApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, l1GethChainID)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(l1Cfg.RelayerConfig.GasOracleSenderPrivateKey, l2GethChainID)
|
||||
rollupApp.Config.L1Config.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
rollupApp.Config.L2Config.RelayerConfig.SenderConfig.Endpoint, err = testApps.GetPoSL1EndPoint()
|
||||
assert.NoError(t, err)
|
||||
|
||||
port, err := rand.Int(rand.Reader, big.NewInt(10000))
|
||||
assert.NoError(t, err)
|
||||
svrPort := strconv.FormatInt(port.Int64()+40000, 10)
|
||||
l2Cfg.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
rollupApp.Config.L2Config.RelayerConfig.ChainMonitor.BaseURL = "http://localhost:" + svrPort
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
@@ -139,7 +137,7 @@ func prepareContracts(t *testing.T) {
|
||||
// L1 ScrolChain contract
|
||||
nonce, err := l1Client.PendingNonceAt(context.Background(), l1Auth.From)
|
||||
assert.NoError(t, err)
|
||||
mockL1ContractAddress := crypto.CreateAddress(l1Auth.From, nonce)
|
||||
scrollChainAddress := crypto.CreateAddress(l1Auth.From, nonce)
|
||||
tx := types.NewContractCreation(nonce, big.NewInt(0), 10000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err := l1Auth.Signer(l1Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
@@ -147,51 +145,23 @@ func prepareContracts(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
_, isPending, getErr := l1Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
return getErr == nil && !isPending
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
receipt, getErr := l1Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
return getErr == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
code, getErr := l1Client.CodeAt(context.Background(), mockL1ContractAddress, nil)
|
||||
return getErr == nil && len(code) > 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
// L2 ScrolChain contract
|
||||
nonce, err = l2Client.PendingNonceAt(context.Background(), l2Auth.From)
|
||||
assert.NoError(t, err)
|
||||
mockL2ContractAddress := crypto.CreateAddress(l2Auth.From, nonce)
|
||||
tx = types.NewContractCreation(nonce, big.NewInt(0), 2000000, big.NewInt(1000000000), common.FromHex(mock_bridge.MockBridgeMetaData.Bin))
|
||||
signedTx, err = l2Auth.Signer(l2Auth.From, tx)
|
||||
assert.NoError(t, err)
|
||||
err = l2Client.SendTransaction(context.Background(), signedTx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
_, isPending, err := l2Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
_, isPending, err := l1Client.TransactionByHash(context.Background(), signedTx.Hash())
|
||||
return err == nil && !isPending
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
receipt, err := l2Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
receipt, err := l1Client.TransactionReceipt(context.Background(), signedTx.Hash())
|
||||
return err == nil && receipt.Status == gethTypes.ReceiptStatusSuccessful
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
assert.Eventually(t, func() bool {
|
||||
code, err := l2Client.CodeAt(context.Background(), mockL2ContractAddress, nil)
|
||||
code, err := l1Client.CodeAt(context.Background(), scrollChainAddress, nil)
|
||||
return err == nil && len(code) > 0
|
||||
}, 30*time.Second, time.Second)
|
||||
|
||||
l1Config, l2Config := rollupApp.Config.L1Config, rollupApp.Config.L2Config
|
||||
l1Config.ScrollChainContractAddress = mockL1ContractAddress
|
||||
l2Config.RelayerConfig.RollupContractAddress = mockL1ContractAddress
|
||||
|
||||
l2Config.RelayerConfig.GasPriceOracleContractAddress = mockL1ContractAddress
|
||||
l1Config.RelayerConfig.GasPriceOracleContractAddress = mockL2ContractAddress
|
||||
l1Config.ScrollChainContractAddress = scrollChainAddress
|
||||
l2Config.RelayerConfig.RollupContractAddress = scrollChainAddress
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
@@ -213,6 +183,5 @@ func TestFunction(t *testing.T) {
|
||||
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportL1GasPriceAfterCurie", testImportL1GasPriceAfterCurie)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
}
|
||||
|
||||
@@ -65,51 +65,6 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportL1GasPriceAfterCurie(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := rollupApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, ¶ms.ChainConfig{BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0)}, relayer.ServiceTypeL1GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l1Relayer.StopSenders()
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
assert.Greater(t, number, startHeight-1)
|
||||
assert.NoError(t, err)
|
||||
err = l1Watcher.FetchBlockHeader(number)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1BlockOrm := orm.NewL1Block(db)
|
||||
// check db status
|
||||
latestBlockHeight, err := l1BlockOrm.GetLatestL1BlockHeight(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, number, latestBlockHeight)
|
||||
blocks, err := l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
l1Relayer.ProcessGasPriceOracle()
|
||||
blocks, err = l1BlockOrm.GetL1Blocks(context.Background(), map[string]interface{}{"number": latestBlockHeight})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(blocks), 1)
|
||||
assert.NotEmpty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -2,7 +2,6 @@ package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"math/big"
|
||||
"testing"
|
||||
"time"
|
||||
@@ -230,14 +229,12 @@ func testCommitBatchAndFinalizeBatch4844(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerChunk: 100000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 1,
|
||||
MaxL1CommitCalldataSizePerBatch: 100000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
@@ -369,14 +366,12 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfter4844(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
@@ -498,14 +493,12 @@ func testCommitBatchAndFinalizeBatchBeforeAndAfterCompression(t *testing.T) {
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
bp := watcher.NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
BatchTimeoutSec: 300,
|
||||
MaxUncompressedBatchBytesSize: math.MaxUint64,
|
||||
}, chainConfig, db, nil)
|
||||
|
||||
cp.TryProposeChunk()
|
||||
|
||||
@@ -177,7 +177,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, dbTX ...
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
|
||||
if err := db.Create(&newBatch).Error; err != nil {
|
||||
|
||||
Reference in New Issue
Block a user