Compare commits

...

10 Commits

Author SHA1 Message Date
georgehao
a5c0dace15 feat: reduce the fetch limit size 2023-11-15 17:53:35 +08:00
Steven
4aa5d5cd37 fix (prover): close file handles used in prover (#1007)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-11-07 16:50:43 +08:00
colin
486c7ee0f9 refactor(rollup-relayer): tweak finalize batch logs (#1009)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-11-06 17:55:18 +08:00
colin
0243c86b3c feat(rollup-relayer): auto finalize batch when timeout in test env (#1008)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-11-06 14:53:58 +08:00
Xi Lin
e6db4ac3a8 feat(contracts): make sure prover and sequencer is EOA (#1002) 2023-11-03 12:21:20 -07:00
georgehao
50040a164e feat(coordinator): split the coordinator cron to single process (#995)
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-10-26 13:15:44 +08:00
David Cardenas
8494ab1899 docs: Update LICENSE (#993)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-10-25 11:19:28 +08:00
Steven
2102e16fdb fix (libzkp): free Rust CString by from_raw (#998) 2023-10-24 10:44:54 +08:00
colin
c2ab4bf16d feat(prover): add failure retry on rpc error (#994)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2023-10-19 14:01:33 +08:00
HAOYUatHZ
1059f9d3f8 fix(prover): free cgo pointers after use (#990)
Co-authored-by: HAOYUatHZ <HAOYUatHZ@users.noreply.github.com>
2023-10-17 22:15:27 +08:00
39 changed files with 524 additions and 195 deletions

View File

@@ -111,7 +111,7 @@ jobs:
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator:
coordinator-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -127,8 +127,29 @@ jobs:
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator.Dockerfile
file: ./build/dockerfiles/coordinator-api.Dockerfile
push: true
tags: scrolltech/coordinator:${{github.ref_name}}
tags: scrolltech/coordinator-api:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator-cron:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/coordinator-cron.Dockerfile
push: true
tags: scrolltech/coordinator-cron:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -1,6 +1,6 @@
MIT License
Copyright (c) 2022 Scroll
Copyright (c) 2022-2023 Scroll
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal

View File

@@ -18,7 +18,7 @@ import (
// Todo : read from config
var (
// the number of blocks fetch per round
fetchLimit = uint64(3000)
fetchLimit = uint64(500)
)
// FetchAndSave is a function type that fetches events from blockchain and saves them to database

View File

@@ -36,7 +36,7 @@ COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
@@ -44,7 +44,7 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
# ENV CHAIN_ID=534353
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator /bin/
RUN /bin/coordinator --version
COPY --from=builder /bin/coordinator_api /bin/
RUN /bin/coordinator_api --version
ENTRYPOINT ["/bin/coordinator"]
ENTRYPOINT ["/bin/coordinator_api"]

View File

@@ -0,0 +1,25 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build coordinator
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/coordinator/cmd/cron/ && go build -v -p 4 -o /bin/coordinator_cron
# Pull coordinator into a second stage deploy alpine container
FROM alpine:latest
COPY --from=builder /bin/coordinator_cron /bin/
ENTRYPOINT ["coordinator_cron"]

View File

@@ -0,0 +1,6 @@
assets/
contracts/
docs/
l2geth/
rpc-gateway/
*target/*

View File

@@ -11,6 +11,19 @@ use std::{
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
/// # Safety
#[no_mangle]
pub extern "C" fn free_c_chars(ptr: *mut c_char) {
if ptr.is_null() {
log::warn!("Try to free an empty pointer!");
return;
}
unsafe {
let _ = CString::from_raw(ptr);
}
}
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
let cstr = unsafe { CStr::from_ptr(c) };
cstr.to_str().unwrap()

View File

@@ -12,3 +12,4 @@ char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);
char* block_traces_to_chunk_info(char* block_traces);
void free_c_chars(char* ptr);

View File

@@ -24,8 +24,10 @@ var (
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// CoordinatorAPIApp the name of mock coordinator app.
CoordinatorAPIApp MockAppName = "coordinator-api-test"
// CoordinatorCronApp the name of mock coordinator cron app.
CoordinatorCronApp MockAppName = "coordinator-cron-test"
// ChunkProverApp the name of mock chunk prover app.
ChunkProverApp MockAppName = "chunkProver-test"

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.3.35"
var tag = "v4.3.40"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -360,6 +360,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @notice Add an account to the sequencer list.
/// @param _account The address of account to add.
function addSequencer(address _account) external onlyOwner {
// @note Currently many external services rely on EOA sequencer to decode metadata directly from tx.calldata.
// So we explicitly make sure the account is EOA.
require(_account.code.length == 0, "not EOA");
isSequencer[_account] = true;
emit UpdateSequencer(_account, true);
@@ -376,6 +380,9 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
/// @notice Add an account to the prover list.
/// @param _account The address of account to add.
function addProver(address _account) external onlyOwner {
// @note Currently many external services rely on EOA prover to decode metadata directly from tx.calldata.
// So we explicitly make sure the account is EOA.
require(_account.code.length == 0, "not EOA");
isProver[_account] = true;
emit UpdateProver(_account, true);

View File

@@ -106,8 +106,8 @@ abstract contract L1GatewayTestBase is DSTestPlus {
}
function prepareL2MessageRoot(bytes32 messageHash) internal {
rollup.addSequencer(address(this));
rollup.addProver(address(this));
rollup.addSequencer(address(0));
rollup.addProver(address(0));
// import genesis batch
bytes memory batchHeader0 = new bytes(89);
@@ -122,7 +122,9 @@ abstract contract L1GatewayTestBase is DSTestPlus {
bytes memory chunk0 = new bytes(1 + 60);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunks[0] = chunk0;
hevm.startPrank(address(0));
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
bytes memory batchHeader1 = new bytes(89);
assembly {
@@ -134,6 +136,7 @@ abstract contract L1GatewayTestBase is DSTestPlus {
mstore(add(batchHeader1, add(0x20, 57)), batchHash0) // parentBatchHash
}
hevm.startPrank(address(0));
rollup.finalizeBatchWithProof(
batchHeader1,
bytes32(uint256(1)),
@@ -141,5 +144,6 @@ abstract contract L1GatewayTestBase is DSTestPlus {
messageHash,
new bytes(0)
);
hevm.stopPrank();
}
}

View File

@@ -67,30 +67,40 @@ contract ScrollChainTest is DSTestPlus {
hevm.expectRevert("caller not sequencer");
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
rollup.addSequencer(address(this));
rollup.addSequencer(address(0));
// invalid version, revert
hevm.startPrank(address(0));
hevm.expectRevert("invalid version");
rollup.commitBatch(1, batchHeader0, new bytes[](0), new bytes(0));
hevm.stopPrank();
// batch is empty, revert
hevm.startPrank(address(0));
hevm.expectRevert("batch is empty");
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
hevm.stopPrank();
// batch header length too small, revert
hevm.startPrank(address(0));
hevm.expectRevert("batch header length too small");
rollup.commitBatch(0, new bytes(88), new bytes[](1), new bytes(0));
hevm.stopPrank();
// wrong bitmap length, revert
hevm.startPrank(address(0));
hevm.expectRevert("wrong bitmap length");
rollup.commitBatch(0, new bytes(90), new bytes[](1), new bytes(0));
hevm.stopPrank();
// incorrect parent batch hash, revert
assembly {
mstore(add(batchHeader0, add(0x20, 25)), 2) // change data hash for batch0
}
hevm.startPrank(address(0));
hevm.expectRevert("incorrect parent batch hash");
rollup.commitBatch(0, batchHeader0, new bytes[](1), new bytes(0));
hevm.stopPrank();
assembly {
mstore(add(batchHeader0, add(0x20, 25)), 1) // change back
}
@@ -101,15 +111,19 @@ contract ScrollChainTest is DSTestPlus {
// no block in chunk, revert
chunk0 = new bytes(1);
chunks[0] = chunk0;
hevm.startPrank(address(0));
hevm.expectRevert("no block in chunk");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
// invalid chunk length, revert
chunk0 = new bytes(1);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunks[0] = chunk0;
hevm.startPrank(address(0));
hevm.expectRevert("invalid chunk length");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
// cannot skip last L1 message, revert
chunk0 = new bytes(1 + 60);
@@ -119,8 +133,10 @@ contract ScrollChainTest is DSTestPlus {
chunk0[60] = bytes1(uint8(1)); // numL1Messages = 1
bitmap[31] = bytes1(uint8(1));
chunks[0] = chunk0;
hevm.startPrank(address(0));
hevm.expectRevert("cannot skip last L1 message");
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
hevm.stopPrank();
// num txs less than num L1 msgs, revert
chunk0 = new bytes(1 + 60);
@@ -130,26 +146,34 @@ contract ScrollChainTest is DSTestPlus {
chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3
bitmap[31] = bytes1(uint8(3));
chunks[0] = chunk0;
hevm.startPrank(address(0));
hevm.expectRevert("num txs less than num L1 msgs");
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
hevm.stopPrank();
// incomplete l2 transaction data, revert
chunk0 = new bytes(1 + 60 + 1);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunks[0] = chunk0;
hevm.startPrank(address(0));
hevm.expectRevert("incomplete l2 transaction data");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
// commit batch with one chunk, no tx, correctly
chunk0 = new bytes(1 + 60);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunks[0] = chunk0;
hevm.startPrank(address(0));
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
assertGt(uint256(rollup.committedBatches(1)), 0);
// batch is already committed, revert
hevm.startPrank(address(0));
hevm.expectRevert("batch already committed");
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
}
function testFinalizeBatchWithProof() public {
@@ -157,8 +181,8 @@ contract ScrollChainTest is DSTestPlus {
hevm.expectRevert("caller not prover");
rollup.finalizeBatchWithProof(new bytes(0), bytes32(0), bytes32(0), bytes32(0), new bytes(0));
rollup.addProver(address(this));
rollup.addSequencer(address(this));
rollup.addProver(address(0));
rollup.addSequencer(address(0));
bytes memory batchHeader0 = new bytes(89);
@@ -176,7 +200,9 @@ contract ScrollChainTest is DSTestPlus {
chunk0 = new bytes(1 + 60);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunks[0] = chunk0;
hevm.startPrank(address(0));
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
assertGt(uint256(rollup.committedBatches(1)), 0);
bytes memory batchHeader1 = new bytes(89);
@@ -190,12 +216,15 @@ contract ScrollChainTest is DSTestPlus {
}
// incorrect batch hash, revert
hevm.expectRevert("incorrect batch hash");
batchHeader1[0] = bytes1(uint8(1)); // change version to 1
hevm.startPrank(address(0));
hevm.expectRevert("incorrect batch hash");
rollup.finalizeBatchWithProof(batchHeader1, bytes32(uint256(1)), bytes32(uint256(2)), bytes32(0), new bytes(0));
hevm.stopPrank();
batchHeader1[0] = bytes1(uint8(0)); // change back
// batch header length too small, revert
hevm.startPrank(address(0));
hevm.expectRevert("batch header length too small");
rollup.finalizeBatchWithProof(
new bytes(88),
@@ -204,8 +233,10 @@ contract ScrollChainTest is DSTestPlus {
bytes32(0),
new bytes(0)
);
hevm.stopPrank();
// wrong bitmap length, revert
hevm.startPrank(address(0));
hevm.expectRevert("wrong bitmap length");
rollup.finalizeBatchWithProof(
new bytes(90),
@@ -214,13 +245,17 @@ contract ScrollChainTest is DSTestPlus {
bytes32(0),
new bytes(0)
);
hevm.stopPrank();
// incorrect previous state root, revert
hevm.startPrank(address(0));
hevm.expectRevert("incorrect previous state root");
rollup.finalizeBatchWithProof(batchHeader1, bytes32(uint256(2)), bytes32(uint256(2)), bytes32(0), new bytes(0));
hevm.stopPrank();
// verify success
assertBoolEq(rollup.isBatchFinalized(1), false);
hevm.startPrank(address(0));
rollup.finalizeBatchWithProof(
batchHeader1,
bytes32(uint256(1)),
@@ -228,12 +263,14 @@ contract ScrollChainTest is DSTestPlus {
bytes32(uint256(3)),
new bytes(0)
);
hevm.stopPrank();
assertBoolEq(rollup.isBatchFinalized(1), true);
assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2)));
assertEq(rollup.withdrawRoots(1), bytes32(uint256(3)));
assertEq(rollup.lastFinalizedBatchIndex(), 1);
// batch already verified, revert
hevm.startPrank(address(0));
hevm.expectRevert("batch already verified");
rollup.finalizeBatchWithProof(
batchHeader1,
@@ -242,11 +279,12 @@ contract ScrollChainTest is DSTestPlus {
bytes32(uint256(3)),
new bytes(0)
);
hevm.stopPrank();
}
function testCommitAndFinalizeWithL1Messages() public {
rollup.addSequencer(address(this));
rollup.addProver(address(this));
rollup.addSequencer(address(0));
rollup.addProver(address(0));
// import 300 L1 messages
for (uint256 i = 0; i < 300; i++) {
@@ -307,14 +345,17 @@ contract ScrollChainTest is DSTestPlus {
chunks = new bytes[](1);
chunks[0] = chunk0;
bitmap = new bytes(32);
hevm.startPrank(address(0));
hevm.expectEmit(true, true, false, true);
emit CommitBatch(1, bytes32(0x00847173b29b238cf319cde79512b7c213e5a8b4138daa7051914c4592b6dfc7));
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
hevm.stopPrank();
assertBoolEq(rollup.isBatchFinalized(1), false);
bytes32 batchHash1 = rollup.committedBatches(1);
assertEq(batchHash1, bytes32(0x00847173b29b238cf319cde79512b7c213e5a8b4138daa7051914c4592b6dfc7));
// finalize batch1
hevm.startPrank(address(0));
hevm.expectEmit(true, true, false, true);
emit FinalizeBatch(1, batchHash1, bytes32(uint256(2)), bytes32(uint256(3)));
rollup.finalizeBatchWithProof(
@@ -324,6 +365,7 @@ contract ScrollChainTest is DSTestPlus {
bytes32(uint256(3)),
new bytes(0)
);
hevm.stopPrank();
assertBoolEq(rollup.isBatchFinalized(1), true);
assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2)));
assertEq(rollup.withdrawRoots(1), bytes32(uint256(3)));
@@ -431,21 +473,28 @@ contract ScrollChainTest is DSTestPlus {
// too many txs in one chunk, revert
rollup.updateMaxNumTxInChunk(2); // 3 - 1
hevm.startPrank(address(0));
hevm.expectRevert("too many txs in one chunk");
rollup.commitBatch(0, batchHeader1, chunks, bitmap); // first chunk with too many txs
hevm.stopPrank();
rollup.updateMaxNumTxInChunk(185); // 5+10+300 - 2 - 127
hevm.startPrank(address(0));
hevm.expectRevert("too many txs in one chunk");
rollup.commitBatch(0, batchHeader1, chunks, bitmap); // second chunk with too many txs
hevm.stopPrank();
rollup.updateMaxNumTxInChunk(186);
hevm.startPrank(address(0));
hevm.expectEmit(true, true, false, true);
emit CommitBatch(2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
hevm.stopPrank();
assertBoolEq(rollup.isBatchFinalized(2), false);
bytes32 batchHash2 = rollup.committedBatches(2);
assertEq(batchHash2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
// verify committed batch correctly
hevm.startPrank(address(0));
hevm.expectEmit(true, true, false, true);
emit FinalizeBatch(2, batchHash2, bytes32(uint256(4)), bytes32(uint256(5)));
rollup.finalizeBatchWithProof(
@@ -455,6 +504,7 @@ contract ScrollChainTest is DSTestPlus {
bytes32(uint256(5)),
new bytes(0)
);
hevm.stopPrank();
assertBoolEq(rollup.isBatchFinalized(2), true);
assertEq(rollup.finalizedStateRoots(2), bytes32(uint256(4)));
assertEq(rollup.withdrawRoots(2), bytes32(uint256(5)));
@@ -489,7 +539,7 @@ contract ScrollChainTest is DSTestPlus {
rollup.revertBatch(new bytes(89), 1);
hevm.stopPrank();
rollup.addSequencer(address(this));
rollup.addSequencer(address(0));
bytes memory batchHeader0 = new bytes(89);
@@ -507,7 +557,9 @@ contract ScrollChainTest is DSTestPlus {
chunk0 = new bytes(1 + 60);
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
chunks[0] = chunk0;
hevm.startPrank(address(0));
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
hevm.stopPrank();
bytes memory batchHeader1 = new bytes(89);
assembly {
@@ -520,7 +572,9 @@ contract ScrollChainTest is DSTestPlus {
}
// commit another batch
hevm.startPrank(address(0));
rollup.commitBatch(0, batchHeader1, chunks, new bytes(0));
hevm.stopPrank();
// count must be nonzero, revert
hevm.expectRevert("count must be nonzero");
@@ -563,7 +617,11 @@ contract ScrollChainTest is DSTestPlus {
rollup.removeSequencer(_sequencer);
hevm.stopPrank();
// change to random operator
hevm.expectRevert("not EOA");
rollup.addSequencer(address(this));
hevm.assume(_sequencer.code.length == 0);
// change to random EOA operator
hevm.expectEmit(true, false, false, true);
emit UpdateSequencer(_sequencer, true);
@@ -586,7 +644,11 @@ contract ScrollChainTest is DSTestPlus {
rollup.removeProver(_prover);
hevm.stopPrank();
// change to random operator
hevm.expectRevert("not EOA");
rollup.addProver(address(this));
hevm.assume(_prover.code.length == 0);
// change to random EOA operator
hevm.expectEmit(true, false, false, true);
emit UpdateProver(_prover, true);
@@ -601,8 +663,8 @@ contract ScrollChainTest is DSTestPlus {
}
function testSetPause() external {
rollup.addSequencer(address(this));
rollup.addProver(address(this));
rollup.addSequencer(address(0));
rollup.addProver(address(0));
// not owner, revert
hevm.startPrank(address(1));
@@ -614,10 +676,12 @@ contract ScrollChainTest is DSTestPlus {
rollup.setPause(true);
assertBoolEq(true, rollup.paused());
hevm.startPrank(address(0));
hevm.expectRevert("Pausable: paused");
rollup.commitBatch(0, new bytes(0), new bytes[](0), new bytes(0));
hevm.expectRevert("Pausable: paused");
rollup.finalizeBatchWithProof(new bytes(0), bytes32(0), bytes32(0), bytes32(0), new bytes(0));
hevm.stopPrank();
// unpause
rollup.setPause(false);

View File

@@ -1,6 +1,5 @@
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
IMAGE_NAME=coordinator
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
@@ -22,14 +21,20 @@ libzkp:
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
coordinator_api: libzkp ## Builds the Coordinator api instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
coordinator_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
coordinator_cron:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
mock_coordinator: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
coordinator_api_skip_libzkp:
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
mock_coordinator_api: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
@@ -45,7 +50,9 @@ clean: ## Empty out the bin folder
@rm -rf build/bin
docker:
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/coordinator-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator-api.Dockerfile
DOCKER_BUILDKIT=1 docker build -t scrolltech/coordinator-cron:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator-cron.Dockerfile
docker_push:
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
docker push scrolltech/coordinator-api:${IMAGE_VERSION}
docker push scrolltech/coordinator-cron:${IMAGE_VERSION}

View File

@@ -12,7 +12,8 @@ See [monorepo prerequisites](../README.md#prerequisites).
```bash
make clean
make coordinator
make coordinator_api
make coordinator_cron
```
The built coordinator binary is in the `build/bin` directory.
@@ -44,13 +45,15 @@ The coordinator behavior can be configured using [`config.json`](config.json). C
* Using default ports and config.json:
```bash
./build/bin/coordinator --http
./build/bin/coordinator_api --http
./build/bin/coordinator_cron
```
* Using manually specified ports and config.json:
```bash
./build/bin/coordinator --config ./config.json --http --http.addr localhost --http.port 8390
./build/bin/coordinator_api --config ./config.json --http --http.addr localhost --http.port 8390
./build/bin/coordinator_cron --config ./config.json
```
* For other flags, refer to [`cmd/app/flags.go`](cmd/app/flags.go).
* For other flags, refer to [`cmd/api/app/flags.go`](cmd/api/app/flags.go).

View File

@@ -22,7 +22,6 @@ import (
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/api"
"scroll-tech/coordinator/internal/controller/cron"
"scroll-tech/coordinator/internal/route"
)
@@ -41,7 +40,7 @@ func init() {
return utils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, utils.CoordinatorApp)
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
}
func action(ctx *cli.Context) error {
@@ -51,28 +50,23 @@ func action(ctx *cli.Context) error {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
proofCollector := cron.NewCollector(subCtx, db, cfg, registry)
defer func() {
proofCollector.Stop()
cancel()
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
apiSrv := apiServer(ctx, cfg, db, registry)
log.Info(
"coordinator start successfully",
"Start coordinator api successfully.",
"version", version.Version,
)

View File

@@ -10,7 +10,7 @@ import (
)
func TestRunCoordinator(t *testing.T) {
coordinator := cmd.NewCmd("coordinator-test", "--version")
coordinator := cmd.NewCmd("coordinator-api-test", "--version")
defer coordinator.WaitExit()
// wait result

View File

@@ -8,10 +8,6 @@ var (
&httpEnabledFlag,
&httpListenAddrFlag,
&httpPortFlag,
// ws flags
&wsEnabledFlag,
&wsListenAddrFlag,
&wsPortFlag,
}
// httpEnabledFlag enable rpc server.
httpEnabledFlag = cli.BoolFlag{
@@ -31,19 +27,4 @@ var (
Usage: "HTTP-RPC server listening port",
Value: 8390,
}
wsEnabledFlag = cli.BoolFlag{
Name: "ws",
Usage: "Enable the WS-RPC server",
}
wsListenAddrFlag = cli.StringFlag{
Name: "ws.addr",
Usage: "WS-RPC server listening interface",
Value: "localhost",
}
// websocket port
wsPortFlag = cli.IntFlag{
Name: "ws.port",
Usage: "WS-RPC server listening port",
Value: 8391,
}
)

View File

@@ -55,8 +55,8 @@ func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
// RunApp run coordinator-test child process by multi parameters.
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorApp), append(c.args, args...)...)
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator successfully") })
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
}
// Free stop and release coordinator-test.

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/coordinator/cmd/api/app"
func main() {
app.Run()
}

View File

@@ -0,0 +1,87 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/database"
"scroll-tech/common/observability"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator/internal/config"
"scroll-tech/coordinator/internal/controller/cron"
)
var app *cli.App
func init() {
// Set up coordinator app info.
app = cli.NewApp()
app.Action = action
app.Name = "coordinator cron"
app.Usage = "The Scroll L2 Coordinator cron"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `coordinator-cron-test` app for integration-cron-test.
utils.RegisterSimulation(app, utils.CoordinatorCronApp)
}
func action(ctx *cli.Context) error {
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
db, err := database.InitDB(cfg.DB)
if err != nil {
log.Crit("failed to init db connection", "err", err)
}
registry := prometheus.DefaultRegisterer
observability.Server(ctx, db)
proofCollector := cron.NewCollector(subCtx, db, cfg, registry)
defer func() {
proofCollector.Stop()
cancel()
if err = database.CloseDB(db); err != nil {
log.Error("can not close db connection", "error", err)
}
}()
log.Info(
"coordinator cron start successfully",
"version", version.Version,
)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
log.Info("coordinator cron exiting success")
return nil
}
// Run coordinator.
func Run() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunCoordinatorCron(t *testing.T) {
coordinator := cmd.NewCmd("coordinator-cron-test", "--version")
defer coordinator.WaitExit()
// wait result
coordinator.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("coordinator cron version %s", version.Version))
coordinator.RunApp(nil)
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/coordinator/cmd/cron/app"
func main() {
app.Run()
}

View File

@@ -1,7 +0,0 @@
package main
import "scroll-tech/coordinator/cmd/app"
func main() {
app.Run()
}

View File

@@ -76,7 +76,7 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
go c.checkBatchAllChunkReady()
go c.cleanupChallenge()
log.Info("Start coordinator successfully.")
log.Info("Start coordinator cron successfully.")
return c
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"crypto/ecdsa"
"fmt"
"net/http"
"sync"
"time"
@@ -35,13 +34,13 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
SetRetryCount(cfg.RetryCount).
SetRetryWaitTime(time.Duration(cfg.RetryWaitTimeSec) * time.Second).
SetBaseURL(cfg.BaseURL).
AddRetryCondition(func(r *resty.Response, _ error) bool {
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
if r.StatusCode() >= http.StatusInternalServerError {
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
AddRetryAfterErrorCondition().
AddRetryCondition(func(response *resty.Response, err error) bool {
if err != nil {
log.Warn("Encountered an error while sending the request. Retrying...", "error", err)
return true
}
return false
return response.IsError()
})
log.Info("successfully initialized prover client",

View File

@@ -49,6 +49,7 @@ func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
C.init_chunk_prover(paramsPathStr, assetsPathStr)
rawVK = C.get_chunk_vk()
}
defer C.free_c_chars(rawVK)
if rawVK != nil {
vk = C.GoString(rawVK)
@@ -161,7 +162,7 @@ func (p *ProverCore) checkChunkProofs(chunkProofsByt []byte) (bool, error) {
log.Info("Start to check chunk proofs ...")
cResult := C.check_chunk_proofs(chunkProofsStr)
defer C.free(unsafe.Pointer(cResult))
defer C.free_c_chars(cResult)
log.Info("Finish checking chunk proofs!")
var result CheckChunkProofsResponse
@@ -188,7 +189,7 @@ func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) ([]
log.Info("Start to create batch proof ...")
bResult := C.gen_batch_proof(chunkInfosStr, chunkProofsStr)
defer C.free(unsafe.Pointer(bResult))
defer C.free_c_chars(bResult)
log.Info("Finish creating batch proof!")
var result ProofResult
@@ -210,7 +211,7 @@ func (p *ProverCore) proveChunk(tracesByt []byte) ([]byte, error) {
log.Info("Start to create chunk proof ...")
cProof := C.gen_chunk_proof(tracesStr)
defer C.free(unsafe.Pointer(cProof))
defer C.free_c_chars(cProof)
log.Info("Finish creating chunk proof!")
var result ProofResult
@@ -235,6 +236,11 @@ func (p *ProverCore) mayDumpProof(id string, proofByt []byte) error {
if err != nil {
return err
}
defer func() {
if err = f.Close(); err != nil {
log.Error("failed to close proof dump file", "id", id, "error", err)
}
}()
log.Info("Saving proof", "task-id", id)
_, err = f.Write(proofByt)
return err
@@ -242,12 +248,10 @@ func (p *ProverCore) mayDumpProof(id string, proofByt []byte) error {
func (p *ProverCore) tracesToChunkInfo(tracesByt []byte) []byte {
tracesStr := C.CString(string(tracesByt))
defer func() {
C.free(unsafe.Pointer(tracesStr))
}()
defer C.free(unsafe.Pointer(tracesStr))
cChunkInfo := C.block_traces_to_chunk_info(tracesStr)
defer C.free_c_chars(cChunkInfo)
chunkInfo := C.GoString(cChunkInfo)
return []byte(chunkInfo)

View File

@@ -92,6 +92,7 @@ func TestFFI(t *testing.T) {
func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
f, err := os.Open(filePat)
as.NoError(err)
defer as.NoError(f.Close())
byt, err := io.ReadAll(f)
as.NoError(err)
@@ -104,6 +105,7 @@ func readChunkTrace(filePat string, as *assert.Assertions) []*types.BlockTrace {
func readVk(filePat string, as *assert.Assertions) string {
f, err := os.Open(filePat)
as.NoError(err)
defer as.NoError(f.Close())
byt, err := io.ReadAll(f)
as.NoError(err)

File diff suppressed because one or more lines are too long

View File

@@ -24,7 +24,6 @@
"min_gas_price": 0,
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
}
},
@@ -59,7 +58,8 @@
"try_times": 5,
"base_url": "http://localhost:8750"
},
"finalize_batch_interval_sec": 0,
"enable_test_env_bypass_features": true,
"finalize_batch_without_proof_timeout_sec": 7200,
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",

View File

@@ -64,6 +64,11 @@ type RelayerConfig struct {
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
// Indicates if bypass features specific to testing environments are enabled.
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
}
// GasOracleConfig The config for updating gas price oracle.

View File

@@ -50,6 +50,11 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
}
// Ensure test features aren't enabled on the mainnet.
if gasOracleSender.GetChainID() == big.NewInt(1) && cfg.EnableTestEnvBypassFeatures {
return nil, fmt.Errorf("cannot enable test env features in mainnet")
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {

View File

@@ -19,6 +19,7 @@ import (
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/common/utils"
bridgeAbi "scroll-tech/rollup/abi"
"scroll-tech/rollup/internal/config"
@@ -88,6 +89,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
}
// Ensure test features aren't enabled on the mainnet.
if commitSender.GetChainID() == big.NewInt(1) && cfg.EnableTestEnvBypassFeatures {
return nil, fmt.Errorf("cannot enable test env features in mainnet")
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
@@ -434,108 +440,27 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc()
batch := batches[0]
hash := batch.Hash
status := types.ProvingStatus(batch.ProvingStatus)
switch status {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
if batch.CommittedAt == nil {
log.Error("batch.CommittedAt is nil", "index", batch.Index, "hash", batch.Hash)
return
}
if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(*batch.CommittedAt) > time.Duration(r.cfg.FinalizeBatchWithoutProofTimeoutSec)*time.Second {
if err := r.finalizeBatch(batch, false); err != nil {
log.Error("Failed to finalize timeout batch without proof", "index", batch.Index, "hash", batch.Hash, "err", err)
}
}
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
log.Info("Start to roll up zk proof", "hash", batch.Hash)
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
// Check batch status before send `finalizeBatchWithProof` tx.
if r.cfg.ChainMonitor.Enabled {
var batchStatus bool
batchStatus, err = r.getBatchStatusByIndex(batch.Index)
if err != nil {
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
return
}
if !batchStatus {
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
return
}
if err := r.finalizeBatch(batch, true); err != nil {
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
}
var parentBatchStateRoot string
if batch.Index > 0 {
var parentBatch *orm.Batch
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
// handle unexpected db error
if err != nil {
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
return
}
parentBatchStateRoot = parentBatch.StateRoot
}
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
if err != nil {
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if err = aggProof.SanityCheck(); err != nil {
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
return
}
data, err := r.l1RollupABI.Pack(
"finalizeBatchWithProof",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
aggProof.Proof,
)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return
}
txID := hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(data),
"err", err,
)
}
return
}
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
// record and sync with db, @todo handle db error
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
"index", batch.Index, "batch hash", batch.Hash,
"tx hash", finalizeTxHash.String(), "err", err)
}
r.processingFinalization.Store(txID, hash)
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
case types.ProvingTaskFailed:
// We were unable to prove this batch. There are two possibilities:
// (a) Prover bug. In this case, we should fix and redeploy the prover.
@@ -553,13 +478,123 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
"ProvedAt", batch.ProvedAt,
"ProofTimeSec", batch.ProofTimeSec,
)
return
default:
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
}
}
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
// Check batch status before send `finalizeBatch` tx.
if r.cfg.ChainMonitor.Enabled {
var batchStatus bool
batchStatus, err := r.getBatchStatusByIndex(batch.Index)
if err != nil {
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
return err
}
if !batchStatus {
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
return err
}
}
var parentBatchStateRoot string
if batch.Index > 0 {
var parentBatch *orm.Batch
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
// handle unexpected db error
if err != nil {
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
return err
}
parentBatchStateRoot = parentBatch.StateRoot
}
var txCalldata []byte
if withProof {
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
if err != nil {
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
return err
}
if err = aggProof.SanityCheck(); err != nil {
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
return err
}
txCalldata, err = r.l1RollupABI.Pack(
"finalizeBatchWithProof",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
aggProof.Proof,
)
if err != nil {
log.Error("Pack finalizeBatchWithProof failed", "err", err)
return err
}
} else {
var err error
txCalldata, err = r.l1RollupABI.Pack(
"finalizeBatch",
batch.BatchHeader,
common.HexToHash(parentBatchStateRoot),
common.HexToHash(batch.StateRoot),
common.HexToHash(batch.WithdrawRoot),
)
if err != nil {
log.Error("Pack finalizeBatch failed", "err", err)
return err
}
}
txID := batch.Hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"err", err,
)
log.Debug(
"finalizeBatch in layer1 failed",
"with proof", withProof,
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(txCalldata),
"err", err,
)
}
return err
}
log.Info("finalizeBatch in layer1", "with proof", withProof, "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash)
// record and sync with db, @todo handle db error
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
return err
}
r.processingFinalization.Store(txID, batch.Hash)
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
return nil
}
// batchStatusResponse the response schema
type batchStatusResponse struct {
ErrCode int `json:"errcode"`

View File

@@ -121,6 +121,37 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
batchMeta := &types.BatchMeta{
StartChunkIndex: 0,
StartChunkHash: chunkHash1.Hex(),
EndChunkIndex: 1,
EndChunkHash: chunkHash2.Hex(),
}
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
})
assert.True(t, ok)
}
func testL2RelayerCommitConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)

View File

@@ -99,6 +99,7 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches)
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)

View File

@@ -165,6 +165,11 @@ func (s *Sender) IsFull() bool {
return s.pendingTxs.Count() >= s.config.PendingLimit
}
// GetChainID returns the chain ID associated with the sender.
func (s *Sender) GetChainID() *big.Int {
return s.chainID
}
// Stop stop the sender module.
func (s *Sender) Stop() {
close(s.stopCh)

View File

@@ -9,6 +9,7 @@ import (
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
@@ -355,9 +356,9 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
switch status {
case types.RollupCommitted:
updateFields["committed_at"] = time.Now()
updateFields["committed_at"] = utils.NowUTC()
case types.RollupFinalized:
updateFields["finalized_at"] = time.Now()
updateFields["finalized_at"] = utils.NowUTC()
}
db := o.db
@@ -380,7 +381,7 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
updateFields["commit_tx_hash"] = commitTxHash
updateFields["rollup_status"] = int(status)
if status == types.RollupCommitted {
updateFields["committed_at"] = time.Now()
updateFields["committed_at"] = utils.NowUTC()
}
db := o.db.WithContext(ctx)

View File

@@ -17,7 +17,7 @@ import (
"scroll-tech/database/migrate"
capp "scroll-tech/coordinator/cmd/app"
capp "scroll-tech/coordinator/cmd/api/app"
"scroll-tech/common/database"
"scroll-tech/common/docker"