mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 15:38:18 -05:00
Compare commits
11 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0243c86b3c | ||
|
|
e6db4ac3a8 | ||
|
|
50040a164e | ||
|
|
8494ab1899 | ||
|
|
2102e16fdb | ||
|
|
c2ab4bf16d | ||
|
|
1059f9d3f8 | ||
|
|
9e8c3432c3 | ||
|
|
ff380141a8 | ||
|
|
2216ad4271 | ||
|
|
25d5fabac9 |
27
.github/workflows/docker.yml
vendored
27
.github/workflows/docker.yml
vendored
@@ -111,7 +111,7 @@ jobs:
|
||||
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator:
|
||||
coordinator-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -127,8 +127,29 @@ jobs:
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator.Dockerfile
|
||||
file: ./build/dockerfiles/coordinator-api.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
tags: scrolltech/coordinator-api:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator-cron:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/coordinator-cron.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/coordinator-cron:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
3
.gitmodules
vendored
3
.gitmodules
vendored
@@ -1,9 +1,6 @@
|
||||
[submodule "l2geth"]
|
||||
path = l2geth
|
||||
url = git@github.com:scroll-tech/go-ethereum.git
|
||||
[submodule "rpc-gateway"]
|
||||
path = rpc-gateway
|
||||
url = git@github.com:scroll-tech/rpc-gateway.git
|
||||
[submodule "contracts/lib/ds-test"]
|
||||
path = contracts/lib/ds-test
|
||||
url = https://github.com/dapphub/ds-test
|
||||
|
||||
2
LICENSE
2
LICENSE
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2022 Scroll
|
||||
Copyright (c) 2022-2023 Scroll
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -36,7 +36,7 @@ COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
|
||||
RUN cd ./coordinator && make coordinator_api_skip_libzkp && mv ./build/bin/coordinator_api /bin/coordinator_api && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
@@ -44,7 +44,7 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
|
||||
# ENV CHAIN_ID=534353
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
RUN /bin/coordinator --version
|
||||
COPY --from=builder /bin/coordinator_api /bin/
|
||||
RUN /bin/coordinator_api --version
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
ENTRYPOINT ["/bin/coordinator_api"]
|
||||
25
build/dockerfiles/coordinator-cron.Dockerfile
Normal file
25
build/dockerfiles/coordinator-cron.Dockerfile
Normal file
@@ -0,0 +1,25 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build coordinator
|
||||
FROM base as builder
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/coordinator/cmd/cron/ && go build -v -p 4 -o /bin/coordinator_cron
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
COPY --from=builder /bin/coordinator_cron /bin/
|
||||
|
||||
ENTRYPOINT ["coordinator_cron"]
|
||||
@@ -0,0 +1,6 @@
|
||||
assets/
|
||||
contracts/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
22
common/libzkp/impl/Cargo.lock
generated
22
common/libzkp/impl/Cargo.lock
generated
@@ -31,7 +31,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
@@ -333,7 +333,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -959,7 +959,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1116,7 +1116,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1296,7 +1296,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1328,7 +1328,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1937,7 +1937,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2135,7 +2135,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2151,7 +2151,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"halo2-mpt-circuits",
|
||||
@@ -2582,7 +2582,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -4125,7 +4125,7 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.5#9b7e16b9412ff6b7a55fac6b9c6a943b55ef3718"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.9.7#2055cc0bb970aa28d597c945b6078e2469af8862"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
|
||||
@@ -21,7 +21,7 @@ halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch =
|
||||
|
||||
[dependencies]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.5", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.7", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -11,6 +11,19 @@ use std::{
|
||||
pub(crate) static OUTPUT_DIR: Lazy<Option<String>> =
|
||||
Lazy::new(|| env::var("PROVER_OUTPUT_DIR").ok());
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub extern "C" fn free_c_chars(ptr: *mut c_char) {
|
||||
if ptr.is_null() {
|
||||
log::warn!("Try to free an empty pointer!");
|
||||
return;
|
||||
}
|
||||
|
||||
unsafe {
|
||||
let _ = CString::from_raw(ptr);
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn c_char_to_str(c: *const c_char) -> &'static str {
|
||||
let cstr = unsafe { CStr::from_ptr(c) };
|
||||
cstr.to_str().unwrap()
|
||||
|
||||
@@ -12,3 +12,4 @@ char* gen_chunk_proof(char* block_traces);
|
||||
char verify_chunk_proof(char* proof);
|
||||
|
||||
char* block_traces_to_chunk_info(char* block_traces);
|
||||
void free_c_chars(char* ptr);
|
||||
|
||||
@@ -24,8 +24,10 @@ var (
|
||||
// DBCliApp the name of mock database app.
|
||||
DBCliApp MockAppName = "db_cli-test"
|
||||
|
||||
// CoordinatorApp the name of mock coordinator app.
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
// CoordinatorAPIApp the name of mock coordinator app.
|
||||
CoordinatorAPIApp MockAppName = "coordinator-api-test"
|
||||
// CoordinatorCronApp the name of mock coordinator cron app.
|
||||
CoordinatorCronApp MockAppName = "coordinator-cron-test"
|
||||
|
||||
// ChunkProverApp the name of mock chunk prover app.
|
||||
ChunkProverApp MockAppName = "chunkProver-test"
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.3.33"
|
||||
var tag = "v4.3.39"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -51,14 +51,16 @@ contract InitializeL1ScrollOwner is Script {
|
||||
address L1_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L1_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L1_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L1_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L1_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L1_DAI_GATEWAY_PROXY_ADDR");
|
||||
address L1_LIDO_GATEWAY_PROXY_ADDR = vm.envAddress("L1_LIDO_GATEWAY_PROXY_ADDR");
|
||||
address L1_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
// address L1_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L1_USDC_GATEWAY_PROXY_ADDR");
|
||||
address L1_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L1_USDC_GATEWAY_PROXY_ADDR");
|
||||
address L1_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L1_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L1_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L1_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
address L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR = vm.envAddress("L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR");
|
||||
// address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
address L1_ENFORCED_TX_GATEWAY_PROXY_ADDR = vm.envAddress("L1_ENFORCED_TX_GATEWAY_PROXY_ADDR");
|
||||
address L1_WHITELIST_ADDR = vm.envAddress("L1_WHITELIST_ADDR");
|
||||
|
||||
ScrollOwner owner;
|
||||
@@ -81,9 +83,8 @@ contract InitializeL1ScrollOwner is Script {
|
||||
configL1ERC721Gateway();
|
||||
configL1ERC1155Gateway();
|
||||
|
||||
// @note comments out for testnet
|
||||
// configEnforcedTxGateway();
|
||||
// configL1USDCGateway();
|
||||
configL1USDCGateway();
|
||||
configEnforcedTxGateway();
|
||||
|
||||
grantRoles();
|
||||
transferOwnership();
|
||||
@@ -96,15 +97,17 @@ contract InitializeL1ScrollOwner is Script {
|
||||
Ownable(L1_SCROLL_CHAIN_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_MESSAGE_QUEUE_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_SCROLL_MESSENGER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
// Ownable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_GAS_PRICE_ORACLE_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_WHITELIST_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_GATEWAY_ROUTER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_DAI_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_LIDO_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_STANDARD_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
// Ownable(L1_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_WETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ERC721_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L1_ERC1155_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
@@ -249,7 +252,6 @@ contract InitializeL1ScrollOwner is Script {
|
||||
owner.updateAccess(L1_ERC1155_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
/*
|
||||
function configL1USDCGateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
@@ -270,5 +272,4 @@ contract InitializeL1ScrollOwner is Script {
|
||||
owner.updateAccess(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR, _selectors, SCROLL_MULTISIG_NO_DELAY_ROLE, true);
|
||||
owner.updateAccess(L1_ENFORCED_TX_GATEWAY_PROXY_ADDR, _selectors, EMERGENCY_MULTISIG_NO_DELAY_ROLE, true);
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
@@ -52,13 +52,18 @@ contract InitializeL2ScrollOwner is Script {
|
||||
address L2_SCROLL_MESSENGER_PROXY_ADDR = vm.envAddress("L2_SCROLL_MESSENGER_PROXY_ADDR");
|
||||
address L2_GATEWAY_ROUTER_PROXY_ADDR = vm.envAddress("L2_GATEWAY_ROUTER_PROXY_ADDR");
|
||||
address L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR");
|
||||
address L2_DAI_GATEWAY_PROXY_ADDR = vm.envAddress("L2_DAI_GATEWAY_PROXY_ADDR");
|
||||
address L2_LIDO_GATEWAY_PROXY_ADDR = vm.envAddress("L2_LIDO_GATEWAY_PROXY_ADDR");
|
||||
address L2_ETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR = vm.envAddress("L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR");
|
||||
// address L2_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L2_USDC_GATEWAY_PROXY_ADDR");
|
||||
address L2_USDC_GATEWAY_PROXY_ADDR = vm.envAddress("L2_USDC_GATEWAY_PROXY_ADDR");
|
||||
address L2_WETH_GATEWAY_PROXY_ADDR = vm.envAddress("L2_WETH_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC721_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC721_GATEWAY_PROXY_ADDR");
|
||||
address L2_ERC1155_GATEWAY_PROXY_ADDR = vm.envAddress("L2_ERC1155_GATEWAY_PROXY_ADDR");
|
||||
|
||||
address L2_USDC_PROXY_ADDR = vm.envAddress("L2_USDC_PROXY_ADDR");
|
||||
address L2_USDC_MASTER_MINTER_ADDR = vm.envAddress("L2_USDC_MASTER_MINTER_ADDR");
|
||||
|
||||
ScrollOwner owner;
|
||||
|
||||
function run() external {
|
||||
@@ -77,8 +82,7 @@ contract InitializeL2ScrollOwner is Script {
|
||||
configL2ERC721Gateway();
|
||||
configL2ERC1155Gateway();
|
||||
|
||||
// @note comments out for testnet
|
||||
// configL2USDCGateway();
|
||||
configL2USDCGateway();
|
||||
|
||||
grantRoles();
|
||||
transferOwnership();
|
||||
@@ -95,13 +99,17 @@ contract InitializeL2ScrollOwner is Script {
|
||||
Ownable(L2_SCROLL_MESSENGER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_GATEWAY_ROUTER_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_CUSTOM_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_DAI_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_LIDO_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_ETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_STANDARD_ERC20_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_WETH_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_ERC721_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_ERC1155_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
|
||||
// Ownable(L2_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_USDC_GATEWAY_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_USDC_PROXY_ADDR).transferOwnership(address(owner));
|
||||
Ownable(L2_USDC_MASTER_MINTER_ADDR).transferOwnership(address(owner));
|
||||
}
|
||||
|
||||
function grantRoles() internal {
|
||||
@@ -201,7 +209,6 @@ contract InitializeL2ScrollOwner is Script {
|
||||
owner.updateAccess(L2_ERC1155_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_1DAY_DELAY_ROLE, true);
|
||||
}
|
||||
|
||||
/*
|
||||
function configL2USDCGateway() internal {
|
||||
bytes4[] memory _selectors;
|
||||
|
||||
@@ -211,6 +218,11 @@ contract InitializeL2ScrollOwner is Script {
|
||||
_selectors[1] = L2USDCGateway.pauseDeposit.selector;
|
||||
_selectors[2] = L2USDCGateway.pauseWithdraw.selector;
|
||||
owner.updateAccess(L2_USDC_GATEWAY_PROXY_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
|
||||
// delay 7 day, scroll multisig
|
||||
_selectors = new bytes4[](1);
|
||||
_selectors[0] = Ownable.transferOwnership.selector;
|
||||
owner.updateAccess(L2_USDC_PROXY_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
owner.updateAccess(L2_USDC_MASTER_MINTER_ADDR, _selectors, TIMELOCK_7DAY_DELAY_ROLE, true);
|
||||
}
|
||||
*/
|
||||
}
|
||||
|
||||
134
contracts/src/L1/ecc.sol
Normal file
134
contracts/src/L1/ecc.sol
Normal file
@@ -0,0 +1,134 @@
|
||||
// SPDX-License-Identifier: GPL-3.0
|
||||
|
||||
pragma solidity >=0.7.0 <0.9.0;
|
||||
|
||||
contract Ecc {
|
||||
/* ECC Functions */
|
||||
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
|
||||
function ecAdd(uint256[2] memory p0, uint256[2] memory p1) public view
|
||||
returns (uint256[2] memory retP)
|
||||
{
|
||||
uint256[4] memory i = [p0[0], p0[1], p1[0], p1[1]];
|
||||
|
||||
assembly {
|
||||
// call ecadd precompile
|
||||
// inputs are: x1, y1, x2, y2
|
||||
if iszero(staticcall(not(0), 0x06, i, 0x80, retP, 0x40)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// https://etherscan.io/address/0x41bf00f080ed41fa86201eac56b8afb170d9e36d#code
|
||||
function ecMul(uint256[2] memory p, uint256 s) public view
|
||||
returns (uint256[2] memory retP)
|
||||
{
|
||||
// With a public key (x, y), this computes p = scalar * (x, y).
|
||||
uint256[3] memory i = [p[0], p[1], s];
|
||||
|
||||
assembly {
|
||||
// call ecmul precompile
|
||||
// inputs are: x, y, scalar
|
||||
if iszero(staticcall(not(0), 0x07, i, 0x60, retP, 0x40)) {
|
||||
revert(0, 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// scroll-tech/scroll/contracts/src/libraries/verifier/RollupVerifier.sol
|
||||
struct G1Point {
|
||||
uint256 x;
|
||||
uint256 y;
|
||||
}
|
||||
struct G2Point {
|
||||
uint256[2] x;
|
||||
uint256[2] y;
|
||||
}
|
||||
function ecPairing(G1Point[] memory p1, G2Point[] memory p2) internal view returns (bool) {
|
||||
uint256 length = p1.length * 6;
|
||||
uint256[] memory input = new uint256[](length);
|
||||
uint256[1] memory result;
|
||||
bool ret;
|
||||
|
||||
require(p1.length == p2.length);
|
||||
|
||||
for (uint256 i = 0; i < p1.length; i++) {
|
||||
input[0 + i * 6] = p1[i].x;
|
||||
input[1 + i * 6] = p1[i].y;
|
||||
input[2 + i * 6] = p2[i].x[0];
|
||||
input[3 + i * 6] = p2[i].x[1];
|
||||
input[4 + i * 6] = p2[i].y[0];
|
||||
input[5 + i * 6] = p2[i].y[1];
|
||||
}
|
||||
|
||||
assembly {
|
||||
ret := staticcall(gas(), 8, add(input, 0x20), mul(length, 0x20), result, 0x20)
|
||||
}
|
||||
require(ret);
|
||||
return result[0] != 0;
|
||||
}
|
||||
|
||||
/* Bench */
|
||||
function ecAdds(uint256 n) public
|
||||
{
|
||||
uint256[2] memory p0;
|
||||
p0[0] = 1;
|
||||
p0[1] = 2;
|
||||
uint256[2] memory p1;
|
||||
p1[0] = 1;
|
||||
p1[1] = 2;
|
||||
|
||||
for (uint i = 0; i < n; i++) {
|
||||
ecAdd(p0, p1);
|
||||
}
|
||||
}
|
||||
|
||||
function ecMuls(uint256 n) public
|
||||
{
|
||||
uint256[2] memory p0;
|
||||
p0[0] = 1;
|
||||
p0[1] = 2;
|
||||
|
||||
for (uint i = 0; i < n; i++) {
|
||||
ecMul(p0, 3);
|
||||
}
|
||||
}
|
||||
|
||||
function ecPairings(uint256 n) public
|
||||
{
|
||||
G1Point[] memory g1_points = new G1Point[](2);
|
||||
G2Point[] memory g2_points = new G2Point[](2);
|
||||
g1_points[0].x = 0x0000000000000000000000000000000000000000000000000000000000000001;
|
||||
g1_points[0].y = 0x0000000000000000000000000000000000000000000000000000000000000002;
|
||||
g2_points[0].x[1] = 0x1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed;
|
||||
g2_points[0].x[0] = 0x198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2;
|
||||
g2_points[0].y[1] = 0x12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa;
|
||||
g2_points[0].y[0] = 0x090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b;
|
||||
g1_points[1].x = 0x1aa125a22bd902874034e67868aed40267e5575d5919677987e3bc6dd42a32fe;
|
||||
g1_points[1].y = 0x1bacc186725464068956d9a191455c2d6f6db282d83645c610510d8d4efbaee0;
|
||||
g2_points[1].x[1] = 0x1b7734c80605f71f1e2de61e998ce5854ff2abebb76537c3d67e50d71422a852;
|
||||
g2_points[1].x[0] = 0x10d5a1e34b2388a5ebe266033a5e0e63c89084203784da0c6bd9b052a78a2cac;
|
||||
g2_points[1].y[1] = 0x275739c5c2cdbc72e37c689e2ab441ea76c1d284b9c46ae8f5c42ead937819e1;
|
||||
g2_points[1].y[0] = 0x018de34c5b7c3d3d75428bbe050f1449ea3d9961d563291f307a1874f7332e65;
|
||||
|
||||
for (uint i = 0; i < n; i++) {
|
||||
ecPairing(g1_points, g2_points);
|
||||
// bool checked = false;
|
||||
// checked = ecPairing(g1_points, g2_points);
|
||||
// require(checked);
|
||||
}
|
||||
}
|
||||
|
||||
// https://github.com/OpenZeppelin/openzeppelin-contracts/blob/8a0b7bed82d6b8053872c3fd40703efd58f5699d/test/utils/cryptography/ECDSA.test.js#L230
|
||||
function ecRecovers(uint256 n) public
|
||||
{
|
||||
bytes32 hash = 0xb94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9;
|
||||
bytes32 r = 0xe742ff452d41413616a5bf43fe15dd88294e983d3d36206c2712f39083d638bd;
|
||||
uint8 v = 0x1b;
|
||||
bytes32 s = 0xe0a0fc89be718fbc1033e1d30d78be1c68081562ed2e97af876f286f3453231d;
|
||||
|
||||
for (uint i = 0; i < n; i++) {
|
||||
ecrecover(hash, v, r, s);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -360,6 +360,10 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @notice Add an account to the sequencer list.
|
||||
/// @param _account The address of account to add.
|
||||
function addSequencer(address _account) external onlyOwner {
|
||||
// @note Currently many external services rely on EOA sequencer to decode metadata directly from tx.calldata.
|
||||
// So we explicitly make sure the account is EOA.
|
||||
require(_account.code.length == 0, "not EOA");
|
||||
|
||||
isSequencer[_account] = true;
|
||||
|
||||
emit UpdateSequencer(_account, true);
|
||||
@@ -376,6 +380,9 @@ contract ScrollChain is OwnableUpgradeable, PausableUpgradeable, IScrollChain {
|
||||
/// @notice Add an account to the prover list.
|
||||
/// @param _account The address of account to add.
|
||||
function addProver(address _account) external onlyOwner {
|
||||
// @note Currently many external services rely on EOA prover to decode metadata directly from tx.calldata.
|
||||
// So we explicitly make sure the account is EOA.
|
||||
require(_account.code.length == 0, "not EOA");
|
||||
isProver[_account] = true;
|
||||
|
||||
emit UpdateProver(_account, true);
|
||||
|
||||
@@ -106,8 +106,8 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
}
|
||||
|
||||
function prepareL2MessageRoot(bytes32 messageHash) internal {
|
||||
rollup.addSequencer(address(this));
|
||||
rollup.addProver(address(this));
|
||||
rollup.addSequencer(address(0));
|
||||
rollup.addProver(address(0));
|
||||
|
||||
// import genesis batch
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
@@ -122,7 +122,9 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
bytes memory chunk0 = new bytes(1 + 60);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
bytes memory batchHeader1 = new bytes(89);
|
||||
assembly {
|
||||
@@ -134,6 +136,7 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
mstore(add(batchHeader1, add(0x20, 57)), batchHash0) // parentBatchHash
|
||||
}
|
||||
|
||||
hevm.startPrank(address(0));
|
||||
rollup.finalizeBatchWithProof(
|
||||
batchHeader1,
|
||||
bytes32(uint256(1)),
|
||||
@@ -141,5 +144,6 @@ abstract contract L1GatewayTestBase is DSTestPlus {
|
||||
messageHash,
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,30 +67,40 @@ contract ScrollChainTest is DSTestPlus {
|
||||
hevm.expectRevert("caller not sequencer");
|
||||
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
|
||||
|
||||
rollup.addSequencer(address(this));
|
||||
rollup.addSequencer(address(0));
|
||||
|
||||
// invalid version, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("invalid version");
|
||||
rollup.commitBatch(1, batchHeader0, new bytes[](0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// batch is empty, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("batch is empty");
|
||||
rollup.commitBatch(0, batchHeader0, new bytes[](0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// batch header length too small, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("batch header length too small");
|
||||
rollup.commitBatch(0, new bytes(88), new bytes[](1), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// wrong bitmap length, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("wrong bitmap length");
|
||||
rollup.commitBatch(0, new bytes(90), new bytes[](1), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// incorrect parent batch hash, revert
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 2) // change data hash for batch0
|
||||
}
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("incorrect parent batch hash");
|
||||
rollup.commitBatch(0, batchHeader0, new bytes[](1), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
assembly {
|
||||
mstore(add(batchHeader0, add(0x20, 25)), 1) // change back
|
||||
}
|
||||
@@ -101,15 +111,19 @@ contract ScrollChainTest is DSTestPlus {
|
||||
// no block in chunk, revert
|
||||
chunk0 = new bytes(1);
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("no block in chunk");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// invalid chunk length, revert
|
||||
chunk0 = new bytes(1);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("invalid chunk length");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// cannot skip last L1 message, revert
|
||||
chunk0 = new bytes(1 + 60);
|
||||
@@ -119,8 +133,10 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunk0[60] = bytes1(uint8(1)); // numL1Messages = 1
|
||||
bitmap[31] = bytes1(uint8(1));
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("cannot skip last L1 message");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
hevm.stopPrank();
|
||||
|
||||
// num txs less than num L1 msgs, revert
|
||||
chunk0 = new bytes(1 + 60);
|
||||
@@ -130,26 +146,34 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunk0[60] = bytes1(uint8(3)); // numL1Messages = 3
|
||||
bitmap[31] = bytes1(uint8(3));
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("num txs less than num L1 msgs");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
hevm.stopPrank();
|
||||
|
||||
// incomplete l2 transaction data, revert
|
||||
chunk0 = new bytes(1 + 60 + 1);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("incomplete l2 transaction data");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// commit batch with one chunk, no tx, correctly
|
||||
chunk0 = new bytes(1 + 60);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
assertGt(uint256(rollup.committedBatches(1)), 0);
|
||||
|
||||
// batch is already committed, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("batch already committed");
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
}
|
||||
|
||||
function testFinalizeBatchWithProof() public {
|
||||
@@ -157,8 +181,8 @@ contract ScrollChainTest is DSTestPlus {
|
||||
hevm.expectRevert("caller not prover");
|
||||
rollup.finalizeBatchWithProof(new bytes(0), bytes32(0), bytes32(0), bytes32(0), new bytes(0));
|
||||
|
||||
rollup.addProver(address(this));
|
||||
rollup.addSequencer(address(this));
|
||||
rollup.addProver(address(0));
|
||||
rollup.addSequencer(address(0));
|
||||
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
|
||||
@@ -176,7 +200,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunk0 = new bytes(1 + 60);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
assertGt(uint256(rollup.committedBatches(1)), 0);
|
||||
|
||||
bytes memory batchHeader1 = new bytes(89);
|
||||
@@ -190,12 +216,15 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
|
||||
// incorrect batch hash, revert
|
||||
hevm.expectRevert("incorrect batch hash");
|
||||
batchHeader1[0] = bytes1(uint8(1)); // change version to 1
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("incorrect batch hash");
|
||||
rollup.finalizeBatchWithProof(batchHeader1, bytes32(uint256(1)), bytes32(uint256(2)), bytes32(0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
batchHeader1[0] = bytes1(uint8(0)); // change back
|
||||
|
||||
// batch header length too small, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("batch header length too small");
|
||||
rollup.finalizeBatchWithProof(
|
||||
new bytes(88),
|
||||
@@ -204,8 +233,10 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes32(0),
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
|
||||
// wrong bitmap length, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("wrong bitmap length");
|
||||
rollup.finalizeBatchWithProof(
|
||||
new bytes(90),
|
||||
@@ -214,13 +245,17 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes32(0),
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
|
||||
// incorrect previous state root, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("incorrect previous state root");
|
||||
rollup.finalizeBatchWithProof(batchHeader1, bytes32(uint256(2)), bytes32(uint256(2)), bytes32(0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// verify success
|
||||
assertBoolEq(rollup.isBatchFinalized(1), false);
|
||||
hevm.startPrank(address(0));
|
||||
rollup.finalizeBatchWithProof(
|
||||
batchHeader1,
|
||||
bytes32(uint256(1)),
|
||||
@@ -228,12 +263,14 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes32(uint256(3)),
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
assertBoolEq(rollup.isBatchFinalized(1), true);
|
||||
assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2)));
|
||||
assertEq(rollup.withdrawRoots(1), bytes32(uint256(3)));
|
||||
assertEq(rollup.lastFinalizedBatchIndex(), 1);
|
||||
|
||||
// batch already verified, revert
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("batch already verified");
|
||||
rollup.finalizeBatchWithProof(
|
||||
batchHeader1,
|
||||
@@ -242,11 +279,12 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes32(uint256(3)),
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
}
|
||||
|
||||
function testCommitAndFinalizeWithL1Messages() public {
|
||||
rollup.addSequencer(address(this));
|
||||
rollup.addProver(address(this));
|
||||
rollup.addSequencer(address(0));
|
||||
rollup.addProver(address(0));
|
||||
|
||||
// import 300 L1 messages
|
||||
for (uint256 i = 0; i < 300; i++) {
|
||||
@@ -307,14 +345,17 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunks = new bytes[](1);
|
||||
chunks[0] = chunk0;
|
||||
bitmap = new bytes(32);
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit CommitBatch(1, bytes32(0x00847173b29b238cf319cde79512b7c213e5a8b4138daa7051914c4592b6dfc7));
|
||||
rollup.commitBatch(0, batchHeader0, chunks, bitmap);
|
||||
hevm.stopPrank();
|
||||
assertBoolEq(rollup.isBatchFinalized(1), false);
|
||||
bytes32 batchHash1 = rollup.committedBatches(1);
|
||||
assertEq(batchHash1, bytes32(0x00847173b29b238cf319cde79512b7c213e5a8b4138daa7051914c4592b6dfc7));
|
||||
|
||||
// finalize batch1
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit FinalizeBatch(1, batchHash1, bytes32(uint256(2)), bytes32(uint256(3)));
|
||||
rollup.finalizeBatchWithProof(
|
||||
@@ -324,6 +365,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes32(uint256(3)),
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
assertBoolEq(rollup.isBatchFinalized(1), true);
|
||||
assertEq(rollup.finalizedStateRoots(1), bytes32(uint256(2)));
|
||||
assertEq(rollup.withdrawRoots(1), bytes32(uint256(3)));
|
||||
@@ -431,21 +473,28 @@ contract ScrollChainTest is DSTestPlus {
|
||||
|
||||
// too many txs in one chunk, revert
|
||||
rollup.updateMaxNumTxInChunk(2); // 3 - 1
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("too many txs in one chunk");
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap); // first chunk with too many txs
|
||||
hevm.stopPrank();
|
||||
rollup.updateMaxNumTxInChunk(185); // 5+10+300 - 2 - 127
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("too many txs in one chunk");
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap); // second chunk with too many txs
|
||||
hevm.stopPrank();
|
||||
|
||||
rollup.updateMaxNumTxInChunk(186);
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit CommitBatch(2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
|
||||
rollup.commitBatch(0, batchHeader1, chunks, bitmap);
|
||||
hevm.stopPrank();
|
||||
assertBoolEq(rollup.isBatchFinalized(2), false);
|
||||
bytes32 batchHash2 = rollup.committedBatches(2);
|
||||
assertEq(batchHash2, bytes32(0x03a9cdcb9d582251acf60937db006ec99f3505fd4751b7c1f92c9a8ef413e873));
|
||||
|
||||
// verify committed batch correctly
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectEmit(true, true, false, true);
|
||||
emit FinalizeBatch(2, batchHash2, bytes32(uint256(4)), bytes32(uint256(5)));
|
||||
rollup.finalizeBatchWithProof(
|
||||
@@ -455,6 +504,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
bytes32(uint256(5)),
|
||||
new bytes(0)
|
||||
);
|
||||
hevm.stopPrank();
|
||||
assertBoolEq(rollup.isBatchFinalized(2), true);
|
||||
assertEq(rollup.finalizedStateRoots(2), bytes32(uint256(4)));
|
||||
assertEq(rollup.withdrawRoots(2), bytes32(uint256(5)));
|
||||
@@ -489,7 +539,7 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.revertBatch(new bytes(89), 1);
|
||||
hevm.stopPrank();
|
||||
|
||||
rollup.addSequencer(address(this));
|
||||
rollup.addSequencer(address(0));
|
||||
|
||||
bytes memory batchHeader0 = new bytes(89);
|
||||
|
||||
@@ -507,7 +557,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
chunk0 = new bytes(1 + 60);
|
||||
chunk0[0] = bytes1(uint8(1)); // one block in this chunk
|
||||
chunks[0] = chunk0;
|
||||
hevm.startPrank(address(0));
|
||||
rollup.commitBatch(0, batchHeader0, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
bytes memory batchHeader1 = new bytes(89);
|
||||
assembly {
|
||||
@@ -520,7 +572,9 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
|
||||
// commit another batch
|
||||
hevm.startPrank(address(0));
|
||||
rollup.commitBatch(0, batchHeader1, chunks, new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// count must be nonzero, revert
|
||||
hevm.expectRevert("count must be nonzero");
|
||||
@@ -563,7 +617,11 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.removeSequencer(_sequencer);
|
||||
hevm.stopPrank();
|
||||
|
||||
// change to random operator
|
||||
hevm.expectRevert("not EOA");
|
||||
rollup.addSequencer(address(this));
|
||||
hevm.assume(_sequencer.code.length == 0);
|
||||
|
||||
// change to random EOA operator
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit UpdateSequencer(_sequencer, true);
|
||||
|
||||
@@ -586,7 +644,11 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.removeProver(_prover);
|
||||
hevm.stopPrank();
|
||||
|
||||
// change to random operator
|
||||
hevm.expectRevert("not EOA");
|
||||
rollup.addProver(address(this));
|
||||
hevm.assume(_prover.code.length == 0);
|
||||
|
||||
// change to random EOA operator
|
||||
hevm.expectEmit(true, false, false, true);
|
||||
emit UpdateProver(_prover, true);
|
||||
|
||||
@@ -601,8 +663,8 @@ contract ScrollChainTest is DSTestPlus {
|
||||
}
|
||||
|
||||
function testSetPause() external {
|
||||
rollup.addSequencer(address(this));
|
||||
rollup.addProver(address(this));
|
||||
rollup.addSequencer(address(0));
|
||||
rollup.addProver(address(0));
|
||||
|
||||
// not owner, revert
|
||||
hevm.startPrank(address(1));
|
||||
@@ -614,10 +676,12 @@ contract ScrollChainTest is DSTestPlus {
|
||||
rollup.setPause(true);
|
||||
assertBoolEq(true, rollup.paused());
|
||||
|
||||
hevm.startPrank(address(0));
|
||||
hevm.expectRevert("Pausable: paused");
|
||||
rollup.commitBatch(0, new bytes(0), new bytes[](0), new bytes(0));
|
||||
hevm.expectRevert("Pausable: paused");
|
||||
rollup.finalizeBatchWithProof(new bytes(0), bytes32(0), bytes32(0), bytes32(0), new bytes(0));
|
||||
hevm.stopPrank();
|
||||
|
||||
// unpause
|
||||
rollup.setPause(false);
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
.PHONY: lint docker clean coordinator coordinator_skip_libzkp mock_coordinator
|
||||
|
||||
IMAGE_NAME=coordinator
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
@@ -22,14 +21,20 @@ libzkp:
|
||||
rm -rf ./internal/logic/verifier/lib && cp -r ../common/libzkp/interface ./internal/logic/verifier/lib
|
||||
find ../common | grep libzktrie.so | xargs -I{} cp {} ./internal/logic/verifier/lib
|
||||
|
||||
coordinator: libzkp ## Builds the Coordinator instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
coordinator_api: libzkp ## Builds the Coordinator api instance.
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
coordinator_skip_libzkp:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
coordinator_cron:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
|
||||
mock_coordinator: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator ./cmd
|
||||
coordinator_api_skip_libzkp:
|
||||
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
mock_coordinator_api: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_api ./cmd/api
|
||||
|
||||
mock_coordinator_cron: ## Builds the mocked Coordinator instance.
|
||||
go build -tags="mock_prover mock_verifier" -o $(PWD)/build/bin/coordinator_cron ./cmd/cron
|
||||
|
||||
test-verifier: libzkp
|
||||
go test -tags ffi -timeout 0 -v ./internal/logic/verifier
|
||||
@@ -45,7 +50,9 @@ clean: ## Empty out the bin folder
|
||||
@rm -rf build/bin
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/${IMAGE_NAME}:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/coordinator-api:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator-api.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/coordinator-cron:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/coordinator-cron.Dockerfile
|
||||
|
||||
docker_push:
|
||||
docker push scrolltech/${IMAGE_NAME}:${IMAGE_VERSION}
|
||||
docker push scrolltech/coordinator-api:${IMAGE_VERSION}
|
||||
docker push scrolltech/coordinator-cron:${IMAGE_VERSION}
|
||||
@@ -12,7 +12,8 @@ See [monorepo prerequisites](../README.md#prerequisites).
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make coordinator
|
||||
make coordinator_api
|
||||
make coordinator_cron
|
||||
```
|
||||
The built coordinator binary is in the `build/bin` directory.
|
||||
|
||||
@@ -44,13 +45,15 @@ The coordinator behavior can be configured using [`config.json`](config.json). C
|
||||
|
||||
* Using default ports and config.json:
|
||||
```bash
|
||||
./build/bin/coordinator --http
|
||||
./build/bin/coordinator_api --http
|
||||
./build/bin/coordinator_cron
|
||||
```
|
||||
|
||||
* Using manually specified ports and config.json:
|
||||
```bash
|
||||
./build/bin/coordinator --config ./config.json --http --http.addr localhost --http.port 8390
|
||||
./build/bin/coordinator_api --config ./config.json --http --http.addr localhost --http.port 8390
|
||||
./build/bin/coordinator_cron --config ./config.json
|
||||
```
|
||||
|
||||
* For other flags, refer to [`cmd/app/flags.go`](cmd/app/flags.go).
|
||||
* For other flags, refer to [`cmd/api/app/flags.go`](cmd/api/app/flags.go).
|
||||
|
||||
|
||||
@@ -22,7 +22,6 @@ import (
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/api"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
"scroll-tech/coordinator/internal/route"
|
||||
)
|
||||
|
||||
@@ -41,7 +40,7 @@ func init() {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `coordinator-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.CoordinatorApp)
|
||||
utils.RegisterSimulation(app, utils.CoordinatorAPIApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
@@ -51,28 +50,23 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
proofCollector := cron.NewCollector(subCtx, db, cfg, registry)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
apiSrv := apiServer(ctx, cfg, db, registry)
|
||||
|
||||
log.Info(
|
||||
"coordinator start successfully",
|
||||
"Start coordinator api successfully.",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
@@ -10,7 +10,7 @@ import (
|
||||
)
|
||||
|
||||
func TestRunCoordinator(t *testing.T) {
|
||||
coordinator := cmd.NewCmd("coordinator-test", "--version")
|
||||
coordinator := cmd.NewCmd("coordinator-api-test", "--version")
|
||||
defer coordinator.WaitExit()
|
||||
|
||||
// wait result
|
||||
@@ -8,10 +8,6 @@ var (
|
||||
&httpEnabledFlag,
|
||||
&httpListenAddrFlag,
|
||||
&httpPortFlag,
|
||||
// ws flags
|
||||
&wsEnabledFlag,
|
||||
&wsListenAddrFlag,
|
||||
&wsPortFlag,
|
||||
}
|
||||
// httpEnabledFlag enable rpc server.
|
||||
httpEnabledFlag = cli.BoolFlag{
|
||||
@@ -31,19 +27,4 @@ var (
|
||||
Usage: "HTTP-RPC server listening port",
|
||||
Value: 8390,
|
||||
}
|
||||
wsEnabledFlag = cli.BoolFlag{
|
||||
Name: "ws",
|
||||
Usage: "Enable the WS-RPC server",
|
||||
}
|
||||
wsListenAddrFlag = cli.StringFlag{
|
||||
Name: "ws.addr",
|
||||
Usage: "WS-RPC server listening interface",
|
||||
Value: "localhost",
|
||||
}
|
||||
// websocket port
|
||||
wsPortFlag = cli.IntFlag{
|
||||
Name: "ws.port",
|
||||
Usage: "WS-RPC server listening port",
|
||||
Value: 8391,
|
||||
}
|
||||
)
|
||||
@@ -55,8 +55,8 @@ func NewCoordinatorApp(base *docker.App, file string) *CoordinatorApp {
|
||||
|
||||
// RunApp run coordinator-test child process by multi parameters.
|
||||
func (c *CoordinatorApp) RunApp(t *testing.T, args ...string) {
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator successfully") })
|
||||
c.AppAPI = cmd.NewCmd(string(utils.CoordinatorAPIApp), append(c.args, args...)...)
|
||||
c.AppAPI.RunApp(func() bool { return c.AppAPI.WaitResult(t, time.Second*20, "Start coordinator api successfully") })
|
||||
}
|
||||
|
||||
// Free stop and release coordinator-test.
|
||||
7
coordinator/cmd/api/main.go
Normal file
7
coordinator/cmd/api/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/coordinator/cmd/api/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
87
coordinator/cmd/cron/app/app.go
Normal file
87
coordinator/cmd/cron/app/app.go
Normal file
@@ -0,0 +1,87 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/observability"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/coordinator/internal/config"
|
||||
"scroll-tech/coordinator/internal/controller/cron"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up coordinator app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "coordinator cron"
|
||||
app.Usage = "The Scroll L2 Coordinator cron"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `coordinator-cron-test` app for integration-cron-test.
|
||||
utils.RegisterSimulation(app, utils.CoordinatorCronApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
db, err := database.InitDB(cfg.DB)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
|
||||
registry := prometheus.DefaultRegisterer
|
||||
observability.Server(ctx, db)
|
||||
|
||||
proofCollector := cron.NewCollector(subCtx, db, cfg, registry)
|
||||
defer func() {
|
||||
proofCollector.Stop()
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close db connection", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info(
|
||||
"coordinator cron start successfully",
|
||||
"version", version.Version,
|
||||
)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
log.Info("coordinator cron exiting success")
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run coordinator.
|
||||
func Run() {
|
||||
// RunApp the coordinator.
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
19
coordinator/cmd/cron/app/app_test.go
Normal file
19
coordinator/cmd/cron/app/app_test.go
Normal file
@@ -0,0 +1,19 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"scroll-tech/common/cmd"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestRunCoordinatorCron(t *testing.T) {
|
||||
coordinator := cmd.NewCmd("coordinator-cron-test", "--version")
|
||||
defer coordinator.WaitExit()
|
||||
|
||||
// wait result
|
||||
coordinator.ExpectWithTimeout(t, true, time.Second*3, fmt.Sprintf("coordinator cron version %s", version.Version))
|
||||
coordinator.RunApp(nil)
|
||||
}
|
||||
7
coordinator/cmd/cron/main.go
Normal file
7
coordinator/cmd/cron/main.go
Normal file
@@ -0,0 +1,7 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/coordinator/cmd/cron/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/coordinator/cmd/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func NewCollector(ctx context.Context, db *gorm.DB, cfg *config.Config, reg prom
|
||||
go c.checkBatchAllChunkReady()
|
||||
go c.cleanupChallenge()
|
||||
|
||||
log.Info("Start coordinator successfully.")
|
||||
log.Info("Start coordinator cron successfully.")
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@@ -35,13 +34,13 @@ func NewCoordinatorClient(cfg *config.CoordinatorConfig, proverName string, priv
|
||||
SetRetryCount(cfg.RetryCount).
|
||||
SetRetryWaitTime(time.Duration(cfg.RetryWaitTimeSec) * time.Second).
|
||||
SetBaseURL(cfg.BaseURL).
|
||||
AddRetryCondition(func(r *resty.Response, _ error) bool {
|
||||
// Check for HTTP 5xx errors, e.g., coordinator is restarting.
|
||||
if r.StatusCode() >= http.StatusInternalServerError {
|
||||
log.Warn("Received unexpected HTTP response. Retrying...", "status code", r.StatusCode())
|
||||
AddRetryAfterErrorCondition().
|
||||
AddRetryCondition(func(response *resty.Response, err error) bool {
|
||||
if err != nil {
|
||||
log.Warn("Encountered an error while sending the request. Retrying...", "error", err)
|
||||
return true
|
||||
}
|
||||
return false
|
||||
return response.IsError()
|
||||
})
|
||||
|
||||
log.Info("successfully initialized prover client",
|
||||
|
||||
@@ -49,6 +49,7 @@ func NewProverCore(cfg *config.ProverCoreConfig) (*ProverCore, error) {
|
||||
C.init_chunk_prover(paramsPathStr, assetsPathStr)
|
||||
rawVK = C.get_chunk_vk()
|
||||
}
|
||||
defer C.free_c_chars(rawVK)
|
||||
|
||||
if rawVK != nil {
|
||||
vk = C.GoString(rawVK)
|
||||
@@ -161,7 +162,7 @@ func (p *ProverCore) checkChunkProofs(chunkProofsByt []byte) (bool, error) {
|
||||
|
||||
log.Info("Start to check chunk proofs ...")
|
||||
cResult := C.check_chunk_proofs(chunkProofsStr)
|
||||
defer C.free(unsafe.Pointer(cResult))
|
||||
defer C.free_c_chars(cResult)
|
||||
log.Info("Finish checking chunk proofs!")
|
||||
|
||||
var result CheckChunkProofsResponse
|
||||
@@ -188,7 +189,7 @@ func (p *ProverCore) proveBatch(chunkInfosByt []byte, chunkProofsByt []byte) ([]
|
||||
|
||||
log.Info("Start to create batch proof ...")
|
||||
bResult := C.gen_batch_proof(chunkInfosStr, chunkProofsStr)
|
||||
defer C.free(unsafe.Pointer(bResult))
|
||||
defer C.free_c_chars(bResult)
|
||||
log.Info("Finish creating batch proof!")
|
||||
|
||||
var result ProofResult
|
||||
@@ -210,7 +211,7 @@ func (p *ProverCore) proveChunk(tracesByt []byte) ([]byte, error) {
|
||||
|
||||
log.Info("Start to create chunk proof ...")
|
||||
cProof := C.gen_chunk_proof(tracesStr)
|
||||
defer C.free(unsafe.Pointer(cProof))
|
||||
defer C.free_c_chars(cProof)
|
||||
log.Info("Finish creating chunk proof!")
|
||||
|
||||
var result ProofResult
|
||||
@@ -242,12 +243,10 @@ func (p *ProverCore) mayDumpProof(id string, proofByt []byte) error {
|
||||
|
||||
func (p *ProverCore) tracesToChunkInfo(tracesByt []byte) []byte {
|
||||
tracesStr := C.CString(string(tracesByt))
|
||||
|
||||
defer func() {
|
||||
C.free(unsafe.Pointer(tracesStr))
|
||||
}()
|
||||
defer C.free(unsafe.Pointer(tracesStr))
|
||||
|
||||
cChunkInfo := C.block_traces_to_chunk_info(tracesStr)
|
||||
defer C.free_c_chars(cChunkInfo)
|
||||
|
||||
chunkInfo := C.GoString(cChunkInfo)
|
||||
return []byte(chunkInfo)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -24,7 +24,6 @@
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
|
||||
}
|
||||
},
|
||||
@@ -59,7 +58,8 @@
|
||||
"try_times": 5,
|
||||
"base_url": "http://localhost:8750"
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"enable_test_env_bypass_features": true,
|
||||
"finalize_batch_without_proof_timeout_sec": 7200,
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
|
||||
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
|
||||
|
||||
@@ -64,6 +64,11 @@ type RelayerConfig struct {
|
||||
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
|
||||
// Indicates if bypass features specific to testing environments are enabled.
|
||||
EnableTestEnvBypassFeatures bool `json:"enable_test_env_bypass_features"`
|
||||
// The timeout in seconds for finalizing a batch without proof, only used when EnableTestEnvBypassFeatures is true.
|
||||
FinalizeBatchWithoutProofTimeoutSec uint64 `json:"finalize_batch_without_proof_timeout_sec"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
|
||||
@@ -50,6 +50,11 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
// Ensure test features aren't enabled on the mainnet.
|
||||
if gasOracleSender.GetChainID() == big.NewInt(1) && cfg.EnableTestEnvBypassFeatures {
|
||||
return nil, fmt.Errorf("cannot enable test env features in mainnet")
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
var gasPriceDiff uint64
|
||||
if cfg.GasOracleConfig != nil {
|
||||
|
||||
@@ -19,6 +19,7 @@ import (
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
bridgeAbi "scroll-tech/rollup/abi"
|
||||
"scroll-tech/rollup/internal/config"
|
||||
@@ -88,6 +89,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
// Ensure test features aren't enabled on the mainnet.
|
||||
if commitSender.GetChainID() == big.NewInt(1) && cfg.EnableTestEnvBypassFeatures {
|
||||
return nil, fmt.Errorf("cannot enable test env features in mainnet")
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
var gasPriceDiff uint64
|
||||
if cfg.GasOracleConfig != nil {
|
||||
@@ -434,108 +440,27 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
switch status {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
if batch.CommittedAt == nil {
|
||||
log.Error("batch.CommittedAt is nil", "index", batch.Index, "hash", batch.Hash)
|
||||
return
|
||||
}
|
||||
|
||||
if r.cfg.EnableTestEnvBypassFeatures && utils.NowUTC().Sub(*batch.CommittedAt) > time.Duration(r.cfg.FinalizeBatchWithoutProofTimeoutSec)*time.Second {
|
||||
if err := r.finalizeBatch(batch, false); err != nil {
|
||||
log.Error("Failed to finalize timeout batch without proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
log.Info("Start to roll up zk proof", "hash", batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
// Check batch status before send `finalizeBatchWithProof` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err = r.getBatchStatusByIndex(batch.Index)
|
||||
if err != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
return
|
||||
}
|
||||
if err := r.finalizeBatch(batch, true); err != nil {
|
||||
log.Error("Failed to finalize batch with proof", "index", batch.Index, "hash", batch.Hash, "err", err)
|
||||
}
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err = r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
}
|
||||
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
|
||||
if err != nil {
|
||||
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
txID := hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
// This can happen normally if we try to finalize 2 or more
|
||||
// batches around the same time. The 2nd tx might fail since
|
||||
// the client does not see the 1st tx's updates at this point.
|
||||
// TODO: add more fine-grained error handling
|
||||
log.Error(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(data),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
return
|
||||
}
|
||||
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
|
||||
if err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
|
||||
"index", batch.Index, "batch hash", batch.Hash,
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
}
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
// (a) Prover bug. In this case, we should fix and redeploy the prover.
|
||||
@@ -553,13 +478,121 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"ProvedAt", batch.ProvedAt,
|
||||
"ProofTimeSec", batch.ProofTimeSec,
|
||||
)
|
||||
return
|
||||
|
||||
default:
|
||||
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) finalizeBatch(batch *orm.Batch, withProof bool) error {
|
||||
// Check batch status before send `finalizeBatchWithProof` tx.
|
||||
if r.cfg.ChainMonitor.Enabled {
|
||||
var batchStatus bool
|
||||
batchStatus, err := r.getBatchStatusByIndex(batch.Index)
|
||||
if err != nil {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedCall.Inc()
|
||||
log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
return err
|
||||
}
|
||||
if !batchStatus {
|
||||
r.metrics.rollupL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
var parentBatch *orm.Batch
|
||||
parentBatch, err := r.batchOrm.GetBatchByIndex(r.ctx, batch.Index-1)
|
||||
// handle unexpected db error
|
||||
if err != nil {
|
||||
log.Error("Failed to get batch", "index", batch.Index-1, "err", err)
|
||||
return err
|
||||
}
|
||||
parentBatchStateRoot = parentBatch.StateRoot
|
||||
}
|
||||
|
||||
var txCalldata []byte
|
||||
if withProof {
|
||||
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, batch.Hash)
|
||||
if err != nil {
|
||||
log.Error("get verified proof by hash failed", "hash", batch.Hash, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if err = aggProof.SanityCheck(); err != nil {
|
||||
log.Error("agg_proof sanity check fails", "hash", batch.Hash, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatchWithProof",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
aggProof.Proof,
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatchWithProof failed", "err", err)
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
var err error
|
||||
txCalldata, err = r.l1RollupABI.Pack(
|
||||
"finalizeBatch",
|
||||
batch.BatchHeader,
|
||||
common.HexToHash(parentBatchStateRoot),
|
||||
common.HexToHash(batch.StateRoot),
|
||||
common.HexToHash(batch.WithdrawRoot),
|
||||
)
|
||||
if err != nil {
|
||||
log.Error("Pack finalizeBatch failed", "err", err)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
txID := batch.Hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), txCalldata, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
// This can happen normally if we try to finalize 2 or more
|
||||
// batches around the same time. The 2nd tx might fail since
|
||||
// the client does not see the 1st tx's updates at this point.
|
||||
// TODO: add more fine-grained error handling
|
||||
log.Error(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(txCalldata),
|
||||
"err", err,
|
||||
)
|
||||
}
|
||||
return err
|
||||
}
|
||||
log.Info("finalizeBatch in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", batch.Hash, "with proof", withProof)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
if err := r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch.Hash, finalizeTxHash.String(), types.RollupFinalizing); err != nil {
|
||||
log.Error("UpdateFinalizeTxHashAndRollupStatus failed", "index", batch.Index, "batch hash", batch.Hash, "tx hash", finalizeTxHash.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
r.processingFinalization.Store(txID, batch.Hash)
|
||||
r.metrics.rollupL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
return nil
|
||||
}
|
||||
|
||||
// batchStatusResponse the response schema
|
||||
type batchStatusResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
|
||||
@@ -121,6 +121,37 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeTimeoutBatches(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
l2Cfg.RelayerConfig.EnableTestEnvBypassFeatures = true
|
||||
l2Cfg.RelayerConfig.FinalizeBatchWithoutProofTimeoutSec = 0
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
relayer.ProcessCommittedBatches()
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
|
||||
return err == nil && len(statuses) == 1 && statuses[0] == types.RollupFinalizing
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -99,6 +99,7 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBatches", testL2RelayerFinalizeTimeoutBatches)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
|
||||
@@ -165,6 +165,11 @@ func (s *Sender) IsFull() bool {
|
||||
return s.pendingTxs.Count() >= s.config.PendingLimit
|
||||
}
|
||||
|
||||
// GetChainID returns the chain ID associated with the sender.
|
||||
func (s *Sender) GetChainID() *big.Int {
|
||||
return s.chainID
|
||||
}
|
||||
|
||||
// Stop stop the sender module.
|
||||
func (s *Sender) Stop() {
|
||||
close(s.stopCh)
|
||||
|
||||
@@ -2,6 +2,7 @@ package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
@@ -116,52 +117,39 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
return nil
|
||||
}
|
||||
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
|
||||
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
|
||||
}
|
||||
|
||||
var blocks []orm.L1Block
|
||||
var err error
|
||||
height := fromBlock
|
||||
for ; height <= toBlock; height++ {
|
||||
var block *gethTypes.Header
|
||||
block, err = w.client.HeaderByNumber(w.ctx, big.NewInt(height))
|
||||
if err != nil {
|
||||
log.Warn("Failed to get block", "height", height, "err", err)
|
||||
break
|
||||
}
|
||||
var baseFee uint64
|
||||
if block.BaseFee != nil {
|
||||
baseFee = block.BaseFee.Uint64()
|
||||
}
|
||||
blocks = append(blocks, orm.L1Block{
|
||||
Number: uint64(height),
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: baseFee,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
})
|
||||
}
|
||||
|
||||
// failed at first block, return with the error
|
||||
if height == fromBlock {
|
||||
var block *gethTypes.Header
|
||||
block, err := w.client.HeaderByNumber(w.ctx, big.NewInt(int64(blockHeight)))
|
||||
if err != nil {
|
||||
log.Warn("Failed to get block", "height", blockHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
toBlock = height - 1
|
||||
|
||||
// insert succeed blocks
|
||||
err = w.l1BlockOrm.InsertL1Blocks(w.ctx, blocks)
|
||||
if block == nil {
|
||||
log.Warn("Received nil block", "height", blockHeight)
|
||||
return errors.New("received nil block")
|
||||
}
|
||||
|
||||
var baseFee uint64
|
||||
if block.BaseFee != nil {
|
||||
baseFee = block.BaseFee.Uint64()
|
||||
}
|
||||
|
||||
l1Block := orm.L1Block{
|
||||
Number: blockHeight,
|
||||
Hash: block.Hash().String(),
|
||||
BaseFee: baseFee,
|
||||
GasOracleStatus: int16(types.GasOraclePending),
|
||||
}
|
||||
|
||||
err = w.l1BlockOrm.InsertL1Blocks(w.ctx, []orm.L1Block{l1Block})
|
||||
if err != nil {
|
||||
log.Warn("Failed to insert L1 block to db", "fromBlock", fromBlock, "toBlock", toBlock, "err", err)
|
||||
log.Warn("Failed to insert L1 block to db", "blockHeight", blockHeight, "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
w.processedBlockHeight = blockHeight
|
||||
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
@@ -355,9 +356,9 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
|
||||
|
||||
switch status {
|
||||
case types.RollupCommitted:
|
||||
updateFields["committed_at"] = time.Now()
|
||||
updateFields["committed_at"] = utils.NowUTC()
|
||||
case types.RollupFinalized:
|
||||
updateFields["finalized_at"] = time.Now()
|
||||
updateFields["finalized_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -380,7 +381,7 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
|
||||
updateFields["commit_tx_hash"] = commitTxHash
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupCommitted {
|
||||
updateFields["committed_at"] = time.Now()
|
||||
updateFields["committed_at"] = utils.NowUTC()
|
||||
}
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
|
||||
Submodule rpc-gateway deleted from df9af42135
@@ -17,7 +17,7 @@ import (
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
capp "scroll-tech/coordinator/cmd/app"
|
||||
capp "scroll-tech/coordinator/cmd/api/app"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
Reference in New Issue
Block a user