Compare commits

..

13 Commits

Author SHA1 Message Date
colin
0d8b00c3de fix(gas-oracle): nil pointer error and allow update gas price when no diff (#1657) 2025-05-12 12:35:42 +08:00
colin
826357ab5d fix(rollup-relayer): update commit status logic (#1656) 2025-05-10 18:46:36 +08:00
georgehao
d26381cba3 upgrade cargo chef to 0.1.71 (#1655) 2025-05-08 21:40:25 +08:00
georgehao
0e65686ce4 upgrade intermdiate rust to 1.86.0 (#1654) 2025-05-08 19:30:13 +08:00
georgehao
6dd878eaca upgrade intermediate rust version (#1653) 2025-05-08 17:57:15 +08:00
colin
a18fe06440 feat: openvm v0.3.0 (#1648)
Co-authored-by: Velaciela <git.rover@outlook.com>
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-24 22:06:35 +08:00
colin
3ac69bec51 feat(rollup-relayer): add a tool to analyze chunk/batch/bundle proposing (#1645)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-23 13:41:03 +08:00
colin
e80f030246 feat(gas-oracle): add a warn log when entering fallback mode (#1646) 2025-04-16 18:05:14 +08:00
colin
a77f1413ee chore(gas-oracle): remove unused code (#1644) 2025-04-14 16:36:27 +08:00
colin
5b62692098 chore(gas-oracle): decommission L2 gas price oracle (#1643) 2025-04-14 15:35:23 +08:00
Morty
4f34e90f00 fix(zkvm-prover): update scroll-proving-sdk (#1642) 2025-04-14 13:38:52 +08:00
colin
6b1b822c81 fix(coordinator): set v4.4.56 as minimum prover version (#1641)
Co-authored-by: Morty <yiweichi1@gmail.com>
2025-04-11 16:35:39 +08:00
colin
a34c01d90b fix(coordinator): support darwin chunk provers (#1640)
Co-authored-by: colinlyguo <colinlyguo@users.noreply.github.com>
2025-04-09 19:04:20 +08:00
45 changed files with 1197 additions and 903 deletions

View File

@@ -42,6 +42,10 @@ jobs:
uses: Swatinem/rust-cache@v2
with:
workspaces: "common/libzkp/impl -> target"
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Lint
working-directory: 'common'
run: |

View File

@@ -307,6 +307,13 @@ jobs:
REPOSITORY: coordinator-api
run: |
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
- name: Setup SSH for private repos
uses: webfactory/ssh-agent@v0.9.0
with:
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
- name: Run custom script
run: |
./build/dockerfiles/coordinator-api/init-openvm.sh
- name: Build and push
uses: docker/build-push-action@v3
env:

View File

@@ -24,6 +24,7 @@ on:
options:
- nightly-2023-12-03
- nightly-2022-12-10
- 1.86.0
default: "nightly-2023-12-03"
PYTHON_VERSION:
description: "Python version"
@@ -47,6 +48,7 @@ on:
type: choice
options:
- 0.1.41
- 0.1.71
BASE_IMAGE:
description: "which intermediate image you want to update"
required: true

View File

@@ -9,6 +9,10 @@ RUN cargo chef prepare --recipe-path recipe.json
FROM chef as zkp-builder
COPY ./common/libzkp/impl/rust-toolchain ./
COPY --from=planner /app/recipe.json recipe.json
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
RUN cargo chef cook --release --recipe-path recipe.json
COPY ./common/libzkp/impl .

View File

@@ -0,0 +1,24 @@
# openvm
# same order and features as zkvm-prover/Cargo.toml.gpu
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }

View File

@@ -0,0 +1,2 @@
[url "https://github.com/"]
insteadOf = ssh://git@github.com/

View File

@@ -0,0 +1,12 @@
#!/bin/bash
set -uex
OPENVM_GPU_COMMIT=dfa10b4
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
# checkout openvm-gpu
if [ ! -d $DIR/openvm-gpu ]; then
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
fi
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}

View File

@@ -2852,8 +2852,8 @@ dependencies = [
[[package]]
name = "openvm"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"bytemuck",
"num-bigint 0.4.6",
@@ -2865,8 +2865,8 @@ dependencies = [
[[package]]
name = "openvm-algebra-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -2894,7 +2894,7 @@ dependencies = [
[[package]]
name = "openvm-algebra-complex-macros"
version = "0.1.0"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-macros-common",
"quote",
@@ -2903,8 +2903,8 @@ dependencies = [
[[package]]
name = "openvm-algebra-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"halo2curves-axiom",
"num-bigint 0.4.6",
@@ -2916,8 +2916,8 @@ dependencies = [
[[package]]
name = "openvm-algebra-moduli-macros"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-macros-common",
"quote",
@@ -2926,8 +2926,8 @@ dependencies = [
[[package]]
name = "openvm-algebra-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-algebra-guest",
"openvm-instructions",
@@ -2940,8 +2940,8 @@ dependencies = [
[[package]]
name = "openvm-bigint-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -2962,8 +2962,8 @@ dependencies = [
[[package]]
name = "openvm-bigint-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"num-bigint 0.4.6",
"num-traits",
@@ -2976,8 +2976,8 @@ dependencies = [
[[package]]
name = "openvm-bigint-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-bigint-guest",
"openvm-instructions",
@@ -2991,8 +2991,8 @@ dependencies = [
[[package]]
name = "openvm-build"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"cargo_metadata",
"eyre",
@@ -3003,8 +3003,8 @@ dependencies = [
[[package]]
name = "openvm-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"backtrace",
"cfg-if",
@@ -3034,8 +3034,8 @@ dependencies = [
[[package]]
name = "openvm-circuit-derive"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"itertools 0.14.0",
"quote",
@@ -3044,8 +3044,8 @@ dependencies = [
[[package]]
name = "openvm-circuit-primitives"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"itertools 0.14.0",
@@ -3059,8 +3059,8 @@ dependencies = [
[[package]]
name = "openvm-circuit-primitives-derive"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"itertools 0.14.0",
"quote",
@@ -3069,8 +3069,8 @@ dependencies = [
[[package]]
name = "openvm-continuations"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derivative",
"openvm-circuit",
@@ -3085,7 +3085,7 @@ dependencies = [
[[package]]
name = "openvm-custom-insn"
version = "0.1.0"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"proc-macro2",
"quote",
@@ -3094,8 +3094,8 @@ dependencies = [
[[package]]
name = "openvm-ecc-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -3125,8 +3125,8 @@ dependencies = [
[[package]]
name = "openvm-ecc-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"ecdsa",
"elliptic-curve",
@@ -3150,8 +3150,8 @@ dependencies = [
[[package]]
name = "openvm-ecc-sw-macros"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-macros-common",
"quote",
@@ -3160,8 +3160,8 @@ dependencies = [
[[package]]
name = "openvm-ecc-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-ecc-guest",
"openvm-instructions",
@@ -3174,8 +3174,8 @@ dependencies = [
[[package]]
name = "openvm-instructions"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"backtrace",
"derive-new 0.6.0",
@@ -3191,8 +3191,8 @@ dependencies = [
[[package]]
name = "openvm-instructions-derive"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"quote",
"syn 2.0.98",
@@ -3200,8 +3200,8 @@ dependencies = [
[[package]]
name = "openvm-keccak256-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -3226,8 +3226,8 @@ dependencies = [
[[package]]
name = "openvm-keccak256-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-platform",
"tiny-keccak",
@@ -3235,8 +3235,8 @@ dependencies = [
[[package]]
name = "openvm-keccak256-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-instructions",
"openvm-instructions-derive",
@@ -3249,16 +3249,16 @@ dependencies = [
[[package]]
name = "openvm-macros-common"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"syn 2.0.98",
]
[[package]]
name = "openvm-mod-circuit-builder"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"itertools 0.14.0",
"num-bigint 0.4.6",
@@ -3276,8 +3276,8 @@ dependencies = [
[[package]]
name = "openvm-native-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -3303,8 +3303,8 @@ dependencies = [
[[package]]
name = "openvm-native-compiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"backtrace",
"itertools 0.14.0",
@@ -3327,8 +3327,8 @@ dependencies = [
[[package]]
name = "openvm-native-compiler-derive"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"quote",
"syn 2.0.98",
@@ -3336,8 +3336,8 @@ dependencies = [
[[package]]
name = "openvm-native-recursion"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"cfg-if",
"itertools 0.14.0",
@@ -3364,8 +3364,8 @@ dependencies = [
[[package]]
name = "openvm-pairing-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -3394,8 +3394,8 @@ dependencies = [
[[package]]
name = "openvm-pairing-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"group 0.13.0",
"halo2curves-axiom",
@@ -3420,8 +3420,8 @@ dependencies = [
[[package]]
name = "openvm-pairing-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-instructions",
"openvm-instructions-derive",
@@ -3434,8 +3434,8 @@ dependencies = [
[[package]]
name = "openvm-platform"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"getrandom 0.2.15",
"libm",
@@ -3445,8 +3445,8 @@ dependencies = [
[[package]]
name = "openvm-poseidon2-air"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derivative",
"lazy_static",
@@ -3462,8 +3462,8 @@ dependencies = [
[[package]]
name = "openvm-rv32-adapters"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"itertools 0.14.0",
@@ -3482,8 +3482,8 @@ dependencies = [
[[package]]
name = "openvm-rv32im-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -3505,8 +3505,8 @@ dependencies = [
[[package]]
name = "openvm-rv32im-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-custom-insn",
"strum_macros 0.26.4",
@@ -3514,8 +3514,8 @@ dependencies = [
[[package]]
name = "openvm-rv32im-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-instructions",
"openvm-instructions-derive",
@@ -3530,8 +3530,8 @@ dependencies = [
[[package]]
name = "openvm-sdk"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"async-trait",
"bitcode",
@@ -3577,8 +3577,8 @@ dependencies = [
[[package]]
name = "openvm-sha256-air"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-circuit-primitives",
"openvm-stark-backend",
@@ -3588,8 +3588,8 @@ dependencies = [
[[package]]
name = "openvm-sha256-circuit"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"derive-new 0.6.0",
"derive_more 1.0.0",
@@ -3611,8 +3611,8 @@ dependencies = [
[[package]]
name = "openvm-sha256-guest"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-platform",
"sha2",
@@ -3620,8 +3620,8 @@ dependencies = [
[[package]]
name = "openvm-sha256-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"openvm-instructions",
"openvm-instructions-derive",
@@ -3634,8 +3634,8 @@ dependencies = [
[[package]]
name = "openvm-stark-backend"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/stark-backend.git?rev=b051e8978da9c829a76b262abf4a9736c8d1681e#b051e8978da9c829a76b262abf4a9736c8d1681e"
version = "1.0.0"
source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e"
dependencies = [
"bitcode",
"cfg-if",
@@ -3660,8 +3660,8 @@ dependencies = [
[[package]]
name = "openvm-stark-sdk"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/stark-backend.git?rev=b051e8978da9c829a76b262abf4a9736c8d1681e#b051e8978da9c829a76b262abf4a9736c8d1681e"
version = "1.0.0"
source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e"
dependencies = [
"derivative",
"derive_more 0.99.19",
@@ -3695,8 +3695,8 @@ dependencies = [
[[package]]
name = "openvm-transpiler"
version = "1.0.0-rc.2"
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
version = "1.0.0"
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
dependencies = [
"elf",
"eyre",
@@ -3737,7 +3737,7 @@ dependencies = [
[[package]]
name = "p3-air"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-field",
"p3-matrix",
@@ -3746,7 +3746,7 @@ dependencies = [
[[package]]
name = "p3-baby-bear"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-field",
"p3-mds",
@@ -3760,7 +3760,7 @@ dependencies = [
[[package]]
name = "p3-blake3"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"blake3",
"p3-symmetric",
@@ -3770,7 +3770,7 @@ dependencies = [
[[package]]
name = "p3-bn254-fr"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"ff 0.13.0",
"halo2curves",
@@ -3785,7 +3785,7 @@ dependencies = [
[[package]]
name = "p3-challenger"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-field",
"p3-maybe-rayon",
@@ -3797,7 +3797,7 @@ dependencies = [
[[package]]
name = "p3-commit"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-challenger",
@@ -3811,7 +3811,7 @@ dependencies = [
[[package]]
name = "p3-dft"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-field",
@@ -3824,7 +3824,7 @@ dependencies = [
[[package]]
name = "p3-field"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"num-bigint 0.4.6",
@@ -3841,7 +3841,7 @@ dependencies = [
[[package]]
name = "p3-fri"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-challenger",
@@ -3860,7 +3860,7 @@ dependencies = [
[[package]]
name = "p3-goldilocks"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"num-bigint 0.4.6",
"p3-dft",
@@ -3877,7 +3877,7 @@ dependencies = [
[[package]]
name = "p3-interpolation"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-field",
"p3-matrix",
@@ -3888,7 +3888,7 @@ dependencies = [
[[package]]
name = "p3-keccak"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-field",
@@ -3900,7 +3900,7 @@ dependencies = [
[[package]]
name = "p3-keccak-air"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-air",
"p3-field",
@@ -3914,7 +3914,7 @@ dependencies = [
[[package]]
name = "p3-matrix"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-field",
@@ -3929,7 +3929,7 @@ dependencies = [
[[package]]
name = "p3-maybe-rayon"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"rayon",
]
@@ -3937,7 +3937,7 @@ dependencies = [
[[package]]
name = "p3-mds"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-dft",
@@ -3951,7 +3951,7 @@ dependencies = [
[[package]]
name = "p3-merkle-tree"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-commit",
@@ -3968,7 +3968,7 @@ dependencies = [
[[package]]
name = "p3-monty-31"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"num-bigint 0.4.6",
@@ -3989,7 +3989,7 @@ dependencies = [
[[package]]
name = "p3-poseidon"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-field",
"p3-mds",
@@ -4000,7 +4000,7 @@ dependencies = [
[[package]]
name = "p3-poseidon2"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"gcd",
"p3-field",
@@ -4012,7 +4012,7 @@ dependencies = [
[[package]]
name = "p3-poseidon2-air"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"p3-air",
"p3-field",
@@ -4028,7 +4028,7 @@ dependencies = [
[[package]]
name = "p3-symmetric"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-field",
@@ -4038,7 +4038,7 @@ dependencies = [
[[package]]
name = "p3-uni-stark"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"itertools 0.14.0",
"p3-air",
@@ -4056,7 +4056,7 @@ dependencies = [
[[package]]
name = "p3-util"
version = "0.1.0"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
dependencies = [
"serde",
]
@@ -5674,7 +5674,7 @@ dependencies = [
[[package]]
name = "scroll-zkvm-circuit-input-types"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
dependencies = [
"alloy-primitives",
"alloy-serde 0.8.3",
@@ -5696,8 +5696,8 @@ dependencies = [
[[package]]
name = "scroll-zkvm-prover"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
version = "0.3.0"
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
dependencies = [
"alloy-primitives",
"base64 0.22.1",
@@ -5732,8 +5732,8 @@ dependencies = [
[[package]]
name = "scroll-zkvm-verifier"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
version = "0.3.0"
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
dependencies = [
"bincode",
"eyre",

View File

@@ -14,8 +14,8 @@ ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
[dependencies]
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-verifier" }
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-prover" }
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-verifier" }
base64 = "0.13.0"
env_logger = "0.9.0"

View File

@@ -276,8 +276,8 @@ const (
SenderTypeFinalizeBatch
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
SenderTypeL1GasOracle
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
SenderTypeL2GasOracle
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
SenderTypeL2GasOracleDeprecated
)
// String returns a string representation of the SenderType.
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
return "SenderTypeFinalizeBatch"
case SenderTypeL1GasOracle:
return "SenderTypeL1GasOracle"
case SenderTypeL2GasOracle:
return "SenderTypeL2GasOracle"
case SenderTypeL2GasOracleDeprecated:
return "SenderTypeL2GasOracleDeprecated"
default:
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
}

View File

@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
"SenderTypeL1GasOracle",
},
{
"SenderTypeL2GasOracle",
SenderTypeL2GasOracle,
"SenderTypeL2GasOracle",
"SenderTypeL2GasOracleDeprecated",
SenderTypeL2GasOracleDeprecated,
"SenderTypeL2GasOracleDeprecated",
},
{
"Invalid Value",

View File

@@ -20,9 +20,12 @@ var (
}
// RollupRelayerFlags contains flags only used in rollup-relayer
RollupRelayerFlags = []cli.Flag{
&ImportGenesisFlag,
&MinCodecVersionFlag,
}
// ProposerToolFlags contains flags only used in proposer tool
ProposerToolFlags = []cli.Flag{
&StartL2BlockFlag,
}
// ConfigFileFlag load json type config file.
ConfigFileFlag = cli.StringFlag{
Name: "config",
@@ -73,12 +76,6 @@ var (
Category: "METRICS",
Value: 6060,
}
// ImportGenesisFlag import genesis batch during startup
ImportGenesisFlag = cli.BoolFlag{
Name: "import-genesis",
Usage: "Import genesis batch into L1 contract during startup",
Value: false,
}
// ServicePortFlag is the port the service will listen on
ServicePortFlag = cli.IntFlag{
Name: "service.port",
@@ -97,4 +94,10 @@ var (
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
Required: true,
}
// StartL2BlockFlag indicates the start L2 block number for proposer tool
StartL2BlockFlag = cli.Uint64Flag{
Name: "start-l2-block",
Usage: "Start L2 block number for proposer tool",
Value: 0,
}
)

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "v4.5.0"
var tag = "v4.5.9"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {

View File

@@ -95,13 +95,13 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
ParamsPath: "",
AssetsPath: "",
ForkName: "darwin",
MinProverVersion: "v4.2.0",
MinProverVersion: "v4.4.57",
},
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "darwinV2",
MinProverVersion: "v4.3.0",
MinProverVersion: "v4.4.89",
},
},
BatchCollectionTimeSec: 60,

View File

@@ -75,9 +75,12 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
return errors.New("auth message verify failure")
}
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
// FIXME: for backward compatibility, set prover version as darwin prover version,
// change v4.4.56 to l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion after Euclid upgrade, including the log.
// hardcode the prover version because l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion is used in another check and should be set as v4.4.89 for darwinV2 provers.
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, "v4.4.56") {
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
"v4.4.56", login.Message.ProverVersion)
}
if len(login.Message.ProverTypes) > 0 {
@@ -139,6 +142,12 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
}
proverVersion := proverVersionSplits[0]
// allowing darwin provers to login, because darwin provers can prove darwinV2 chunk tasks
if proverVersion == "v4.4.56" {
return "darwin", nil
}
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
return strings.Join(hardForkNames, ","), nil
}

View File

@@ -121,6 +121,11 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
return "", getHardForkErr
}
// for backward compatibility, darwin chunk prover can still prove darwinV2 chunk tasks
if taskCtx.taskType == message.ProofTypeChunk && hardForkName == "darwinV2" && strings.HasPrefix(taskCtx.ProverVersion, "v4.4.56") {
return hardForkName, nil
}
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
}

View File

@@ -89,13 +89,13 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
ParamsPath: "",
AssetsPath: "",
ForkName: "homestead",
MinProverVersion: "v4.2.0",
MinProverVersion: "v4.4.57",
},
HighVersionCircuit: &config.CircuitConfig{
ParamsPath: "",
AssetsPath: "",
ForkName: "bernoulli",
MinProverVersion: "v4.3.0",
MinProverVersion: "v4.4.89",
},
},
BatchCollectionTimeSec: 10,
@@ -142,7 +142,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
func setEnv(t *testing.T) {
var err error
version.Version = "v4.2.0"
version.Version = "v4.4.57"
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
glogger.Verbosity(log.LvlInfo)
@@ -285,14 +285,12 @@ func testOutdatedProverVersion(t *testing.T) {
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
assert.True(t, chunkProver.healthCheckSuccess(t))
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion)
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", chunkProver.proverVersion)
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion)
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", batchProver.proverVersion)
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
assert.Equal(t, types.ErrJWTCommonErr, code)
assert.Equal(t, expectedErr, errors.New(errMsg))

View File

@@ -33,3 +33,45 @@ make rollup_bins
./build/bin/gas_oracle --config ./conf/config.json
./build/bin/rollup_relayer --config ./conf/config.json
```
## Proposer Tool
The Proposer Tool replays historical blocks with custom configurations (e.g., future hardfork configs, custom chunk/batch/bundle proposer configs) to generate chunks/batches/bundles, helping test parameter changes before protocol upgrade.
You can:
1. Enable different hardforks in the genesis configuration.
2. Set custom chunk-proposer, batch-proposer, and bundle-proposer parameters.
3. Analyze resulting metrics (blob size, block count, transaction count, gas usage).
## How to run the proposer tool?
### Set the configs
1. Set genesis config to enable desired hardforks in [`proposer-tool-genesis.json`](./proposer-tool-genesis.json).
2. Set proposer config in [`proposer-tool-config.json`](./proposer-tool-config.json) for data analysis.
3. Set `start-l2-block` in the launch command of proposer-tool in [`docker-compose-proposer-tool.yml`](./docker-compose-proposer-tool.yml) to the block number you want to start from. The default is `0`, which means starting from the genesis block.
### Start the proposer tool using docker-compose
Prerequisite: an RPC URL to an archive L2 node. The default url in [`proposer-tool-config.json`](./proposer-tool-config.json) is `https://rpc.scroll.io`.
```
cd rollup
DOCKER_BUILDKIT=1 docker-compose -f docker-compose-proposer-tool.yml up -d
```
> Note: The port 5432 of database is mapped to the host machine. You can use `psql` or any db clients to connect to the database.
> The DSN for the database is `postgres://postgres:postgres@db:5432/scroll?sslmode=disable`.
### Reset env
```
docker-compose -f docker-compose-proposer-tool.yml down -v
```
If you need to rebuild the images, removing the old images is necessary. You can do this by running the following command:
```
docker images | grep rollup | awk '{print $3}' | xargs docker rmi -f
```

View File

@@ -10,7 +10,6 @@ import (
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
@@ -72,22 +71,12 @@ func action(ctx *cli.Context) error {
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
}
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
if err != nil {
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
if err != nil {
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
}
// Start l1 watcher process
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
// Fetch the latest block number to decrease the delay when fetching gas prices
@@ -106,7 +95,6 @@ func action(ctx *cli.Context) error {
// Start l1relayer process
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
// Finish start all message relayer functions
log.Info("Start gas-oracle successfully", "version", version.Version)

View File

@@ -0,0 +1,93 @@
package app
import (
"context"
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/controller/watcher"
)
var app *cli.App
func init() {
// Set up proposer-tool app info.
app = cli.NewApp()
app.Action = action
app.Name = "proposer-tool"
app.Usage = "The Scroll Proposer Tool"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
app.Flags = append(app.Flags, utils.ProposerToolFlags...)
app.Commands = []*cli.Command{}
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfigForReplay(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
subCtx, cancel := context.WithCancel(ctx.Context)
startL2BlockHeight := ctx.Uint64(utils.StartL2BlockFlag.Name)
genesisPath := ctx.String(utils.Genesis.Name)
genesis, err := utils.ReadGenesis(genesisPath)
if err != nil {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
// sanity check config
if cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch <= 0 {
log.Crit("cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch must be greater than 0")
}
if cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk <= 0 {
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
}
proposerTool, err := watcher.NewProposerTool(subCtx, cancel, cfg, startL2BlockHeight, minCodecVersion, genesis.Config)
if err != nil {
log.Crit("failed to create proposer tool", "startL2BlockHeight", startL2BlockHeight, "minCodecVersion", minCodecVersion, "error", err)
}
proposerTool.Start()
log.Info("Start proposer-tool successfully", "version", version.Version)
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
cancel()
proposerTool.Stop()
return nil
}
// Run proposer tool cmd instance.
func Run() {
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,7 @@
package main
import "scroll-tech/rollup/cmd/proposer_tool/app"
func main() {
app.Run()
}

View File

@@ -79,8 +79,6 @@ func action(ctx *cli.Context) error {
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
// sanity check config
if cfg.L2Config.RelayerConfig.BatchSubmission == nil {
log.Crit("cfg.L2Config.RelayerConfig.BatchSubmission must not be nil")
@@ -98,7 +96,7 @@ func action(ctx *cli.Context) error {
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL2RollupRelayer, registry)
if err != nil {
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
}

View File

@@ -0,0 +1,40 @@
version: '3'
services:
db:
image: postgres:14
environment:
- POSTGRES_USER=postgres
- POSTGRES_PASSWORD=postgres
- POSTGRES_DB=scroll
ports:
- "5432:5432"
volumes:
- postgres_data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U postgres"]
interval: 5s
timeout: 5s
retries: 5
proposer-tool:
build:
context: ..
dockerfile: ./rollup/proposer_tool.Dockerfile
depends_on:
db:
condition: service_healthy
command: [
"--config", "/app/conf/proposer-tool-config.json",
"--genesis", "/app/conf/proposer-tool-genesis.json",
"--min-codec-version", "4",
"--start-l2-block", "10000",
"--log.debug", "--verbosity", "3"
]
volumes:
- ./proposer-tool-config.json:/app/conf/proposer-tool-config.json
- ./proposer-tool-genesis.json:/app/conf/proposer-tool-genesis.json
restart: unless-stopped
volumes:
postgres_data:

View File

@@ -1,7 +1,10 @@
package config
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"reflect"
"strings"
@@ -20,6 +23,11 @@ type Config struct {
DBConfig *database.Config `json:"db_config"`
}
type ConfigForReplay struct {
Config
DBConfigForReplay *database.Config `json:"db_config_for_replay"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*Config, error) {
v := viper.New()
@@ -87,3 +95,19 @@ func NewConfig(file string) (*Config, error) {
return cfg, nil
}
// NewConfigForReplay returns a new instance of ConfigForReplay.
func NewConfigForReplay(file string) (*ConfigForReplay, error) {
buf, err := os.ReadFile(filepath.Clean(file))
if err != nil {
return nil, err
}
cfg := &ConfigForReplay{}
err = json.Unmarshal(buf, cfg)
if err != nil {
return nil, err
}
return cfg, nil
}

View File

@@ -25,6 +25,6 @@ const (
ServiceTypeL2RollupRelayer
// ServiceTypeL1GasOracle indicates the service is a Layer 1 gas oracle.
ServiceTypeL1GasOracle
// ServiceTypeL2GasOracle indicates the service is a Layer 2 gas oracle.
ServiceTypeL2GasOracle
// ServiceTypeL2GasOracleDeprecated indicates the service is a Layer 2 gas oracle, which is deprecated.
ServiceTypeL2GasOracleDeprecated
)

View File

@@ -167,6 +167,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault {
return
}
log.Warn("The committing batch has been stuck for a long time, it's likely that the L1 gas fee spiked, set fees to default values", "currentBaseFee", baseFee, "currentBlobBaseFee", blobBaseFee, "threshold (min)", r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold, "defaultBaseFee", r.cfg.GasOracleConfig.L1BaseFeeDefault, "defaultBlobBaseFee", r.cfg.GasOracleConfig.L1BlobBaseFeeDefault)
baseFee = r.cfg.GasOracleConfig.L1BaseFeeDefault
blobBaseFee = r.cfg.GasOracleConfig.L1BlobBaseFeeDefault
} else if err != nil {
@@ -250,7 +251,11 @@ func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64
return true
}
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
expectedBaseFeeDelta := r.lastBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
// Allowing a minimum of 0 wei if the gas price diff config is 0, this will be used to let the gas oracle send transactions continuously.
if r.gasPriceDiff > 0 {
expectedBaseFeeDelta += 1
}
if baseFee >= r.minGasPrice && math.Abs(float64(baseFee)-float64(r.lastBaseFee)) >= float64(expectedBaseFeeDelta) {
return true
}
@@ -278,5 +283,7 @@ func (r *Layer1Relayer) commitBatchReachTimeout() (bool, error) {
}
// len(batches) == 0 probably shouldn't ever happen, but need to check this
// Also, we should check if it's a genesis batch. If so, skip the timeout check.
return len(batches) == 0 || (batches[0].Index != 0 && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
// If finalizing/finalized status is updated before committed status, skip the timeout check of this round.
// Because batches[0].CommittedAt is nil in this case, this will only continue for a short time window.
return len(batches) == 0 || (batches[0].Index != 0 && batches[0].CommittedAt != nil && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
}

View File

@@ -4,7 +4,6 @@ import (
"context"
"errors"
"fmt"
"math"
"math/big"
"sort"
"strings"
@@ -54,12 +53,7 @@ type Layer2Relayer struct {
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
l2GasOracleABI *abi.ABI
lastGasPrice uint64
minGasPrice uint64
gasPriceDiff uint64
l2GasOracleABI *abi.ABI
// Used to get batch status from chain_monitor api.
chainMonitorClient *resty.Client
@@ -70,22 +64,10 @@ type Layer2Relayer struct {
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
var gasOracleSender, commitSender, finalizeSender *sender.Sender
var err error
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
var commitSender, finalizeSender *sender.Sender
switch serviceType {
case ServiceTypeL2GasOracle:
gasOracleSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderSignerConfig, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg)
if err != nil {
return nil, fmt.Errorf("new gas oracle sender failed, err: %w", err)
}
// Ensure test features aren't enabled on the ethereum mainnet.
if gasOracleSender.GetChainID().Cmp(big.NewInt(1)) == 0 && cfg.EnableTestEnvBypassFeatures {
return nil, errors.New("cannot enable test env features in mainnet")
}
case ServiceTypeL2RollupRelayer:
commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
if err != nil {
@@ -118,16 +100,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
}
var minGasPrice uint64
var gasPriceDiff uint64
if cfg.GasOracleConfig != nil {
minGasPrice = cfg.GasOracleConfig.MinGasPrice
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
} else {
minGasPrice = 0
gasPriceDiff = defaultGasPriceDiff
}
layer2Relayer := &Layer2Relayer{
ctx: ctx,
db: db,
@@ -143,11 +115,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
gasOracleSender: gasOracleSender,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
cfg: cfg,
chainCfg: chainCfg,
@@ -161,16 +129,12 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
}
// Initialize genesis before we do anything else
if initGenesis {
if err := layer2Relayer.initializeGenesis(); err != nil {
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
if err := layer2Relayer.initializeGenesis(); err != nil {
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
layer2Relayer.metrics = initL2RelayerMetrics(reg)
switch serviceType {
case ServiceTypeL2GasOracle:
go layer2Relayer.handleL2GasOracleConfirmLoop(ctx)
case ServiceTypeL2RollupRelayer:
go layer2Relayer.handleL2RollupRelayerConfirmLoop(ctx)
default:
@@ -301,80 +265,6 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
}
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if err != nil {
log.Error("Failed to GetLatestBatch", "err", err)
return
}
if types.GasOracleStatus(batch.OracleStatus) == types.GasOraclePending {
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
if err != nil {
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
return
}
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
// include the token exchange rate in the fee data if alternative gas token enabled
if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled {
// The exchange rate represent the number of native token on L1 required to exchange for 1 native token on L2.
var exchangeRate float64
switch r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode {
case "Fixed":
exchangeRate = r.cfg.GasOracleConfig.AlternativeGasTokenConfig.FixedExchangeRate
case "BinanceApi":
exchangeRate, err = rutils.GetExchangeRateFromBinanceApi(r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, 5)
if err != nil {
log.Error("Failed to get gas token exchange rate from Binance api", "tokenSymbolPair", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, "err", err)
return
}
default:
log.Error("Invalid alternative gas token mode", "mode", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode)
return
}
if exchangeRate == 0 {
log.Error("Invalid exchange rate", "exchangeRate", exchangeRate)
return
}
suggestGasPriceUint64 = uint64(math.Ceil(float64(suggestGasPriceUint64) * exchangeRate))
suggestGasPrice = new(big.Int).SetUint64(suggestGasPriceUint64)
}
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
if r.lastGasPrice > 0 && expectedDelta == 0 {
expectedDelta = 1
}
// last is undefined or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice &&
(math.Abs(float64(suggestGasPriceUint64)-float64(r.lastGasPrice)) >= float64(expectedDelta))) {
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
if err != nil {
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
return
}
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
if err != nil {
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
return
}
err = r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
if err != nil {
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
return
}
r.lastGasPrice = suggestGasPriceUint64
r.metrics.rollupL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
}
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
@@ -1040,22 +930,6 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
}
case types.SenderTypeL2GasOracle:
batchHash := cfm.ContextID
var status types.GasOracleStatus
if cfm.IsSuccessful {
status = types.GasOracleImported
r.metrics.rollupL2UpdateGasOracleConfirmedTotal.Inc()
} else {
status = types.GasOracleImportedFailed
r.metrics.rollupL2UpdateGasOracleConfirmedFailedTotal.Inc()
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer1", "confirmation", cfm)
}
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batchHash, status, cfm.TxHash.String())
if err != nil {
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
}
default:
log.Warn("Unknown transaction type", "confirmation", cfm)
}
@@ -1063,17 +937,6 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
log.Info("Transaction confirmed in layer1", "confirmation", cfm)
}
func (r *Layer2Relayer) handleL2GasOracleConfirmLoop(ctx context.Context) {
for {
select {
case <-ctx.Done():
return
case cfm := <-r.gasOracleSender.ConfirmChan():
r.handleConfirmation(cfm)
}
}
}
func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
for {
select {
@@ -1248,10 +1111,6 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
// for unit test
func (r *Layer2Relayer) StopSenders() {
if r.gasOracleSender != nil {
r.gasOracleSender.Stop()
}
if r.commitSender != nil {
r.commitSender.Stop()
}

View File

@@ -12,14 +12,10 @@ type l2RelayerMetrics struct {
rollupL2RelayerProcessPendingBatchTotal prometheus.Counter
rollupL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal prometheus.Counter
rollupL2RelayerGasPriceOraclerRunTotal prometheus.Counter
rollupL2RelayerLastGasPrice prometheus.Gauge
rollupL2BatchesCommittedConfirmedTotal prometheus.Counter
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter
@@ -56,14 +52,6 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_layer2_process_pending_batch_err_too_many_pending_blob_txs_total",
Help: "The total number of layer2 process pending batch failed on too many pending blob txs",
}),
rollupL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
rollupL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "rollup_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of rollup relayer l2",
}),
rollupL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
@@ -80,14 +68,6 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
Name: "rollup_layer2_process_finalized_batches_confirmed_failed_total",
Help: "The total number of layer2 process finalized batches confirmed failed total",
}),
rollupL2UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_total",
Help: "The total number of updating layer2 gas oracle confirmed",
}),
rollupL2UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total",
Help: "The total number of updating layer2 gas oracle confirmed failed",
}),
rollupL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "rollup_layer2_chain_monitor_latest_failed_batch_call",
Help: "The total number of failed call chain_monitor api",

View File

@@ -2,7 +2,6 @@ package relayer
import (
"context"
"errors"
"math/big"
"net/http"
"strings"
@@ -14,9 +13,7 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
"github.com/scroll-tech/go-ethereum/params"
"github.com/smartystreets/goconvey/convey"
"github.com/stretchr/testify/assert"
"gorm.io/gorm"
@@ -44,7 +41,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
@@ -64,7 +61,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
assert.Fail(t, "unsupported codec version, expected CodecV4")
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
@@ -113,7 +110,7 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
batch := &encoding.Batch{
@@ -181,7 +178,7 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
if codecVersion == encoding.CodecV4 {
chainConfig = &params.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
}
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -257,7 +254,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -313,7 +310,7 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
@@ -374,149 +371,6 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected")
}
func testL2RelayerGasOracleConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
batch1 := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk1},
}
batchOrm := orm.NewBatch(db)
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0, rutils.BatchMetrics{})
assert.NoError(t, err)
batch2 := &encoding.Batch{
Index: batch1.Index + 1,
TotalL1MessagePoppedBefore: batch1.TotalL1MessagePoppedBefore,
ParentBatchHash: common.HexToHash(dbBatch1.Hash),
Chunks: []*encoding.Chunk{chunk2},
}
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0, rutils.BatchMetrics{})
assert.NoError(t, err)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// Simulate message confirmations.
type BatchConfirmation struct {
batchHash string
isSuccessful bool
}
confirmations := []BatchConfirmation{
{batchHash: dbBatch1.Hash, isSuccessful: true},
{batchHash: dbBatch2.Hash, isSuccessful: false},
}
for _, confirmation := range confirmations {
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
ContextID: confirmation.batchHash,
IsSuccessful: confirmation.isSuccessful,
SenderType: types.SenderTypeL2GasOracle,
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleImportedFailed}
for i, confirmation := range confirmations {
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
return false
}
}
return true
})
assert.True(t, ok)
}
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()
var batchOrm *orm.Batch
convey.Convey("Failed to GetLatestBatch", t, func() {
targetErr := errors.New("GetLatestBatch error")
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
return nil, targetErr
})
defer patchGuard.Reset()
relayer.ProcessGasPriceOracle()
})
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
batch := orm.Batch{
OracleStatus: int16(types.GasOraclePending),
Hash: "0x0000000000000000000000000000000000000000",
}
return &batch, nil
})
defer patchGuard.Reset()
convey.Convey("Failed to fetch SuggestGasPrice from l2geth", t, func() {
targetErr := errors.New("SuggestGasPrice error")
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
return nil, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
return big.NewInt(100), nil
})
convey.Convey("Failed to pack setL2BaseFee", t, func() {
targetErr := errors.New("setL2BaseFee error")
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
return nil, nil
})
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
return common.Hash{}, targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
return common.HexToHash("0x56789abcdef1234"), nil
})
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return targetErr
})
relayer.ProcessGasPriceOracle()
})
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
return nil
})
relayer.ProcessGasPriceOracle()
}
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
router := gin.New()
r := router.Group("/v1")
@@ -539,7 +393,7 @@ func testGetBatchStatusByIndex(t *testing.T) {
defer database.CloseDB(db)
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, &params.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, relayer)
defer relayer.StopSenders()

View File

@@ -128,8 +128,6 @@ func TestFunctions(t *testing.T) {
t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles)
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
// test getBatchStatusByIndex
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)

View File

@@ -13,6 +13,8 @@ import (
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/types"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
"scroll-tech/rollup/internal/utils"
@@ -34,6 +36,7 @@ type BatchProposer struct {
maxUncompressedBatchBytesSize uint64
maxChunksPerBatch int
replayMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -80,6 +83,7 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
maxChunksPerBatch: cfg.MaxChunksPerBatch,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -152,6 +156,14 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
return p
}
// SetReplayDB sets the replay database for the BatchProposer.
// This is used for the proposer tool only, to change the l2_block data source.
// This function is not thread-safe and should be called after initializing the BatchProposer and before starting to propose chunks.
func (p *BatchProposer) SetReplayDB(replayDB *gorm.DB) {
p.l2BlockOrm = orm.NewL2Block(replayDB)
p.replayMode = true
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
@@ -226,6 +238,15 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", dbBatch.Hash, "error", dbErr)
return dbErr
}
if p.replayMode {
// If replayMode is true, meaning the batch was proposed by the proposer tool,
// set batch status to types.RollupCommitted and assign a unique commit tx hash to enable new bundle proposals.
if dbErr = p.batchOrm.UpdateCommitTxHashAndRollupStatus(p.ctx, dbBatch.Hash, dbBatch.Hash, types.RollupCommitted, dbTX); dbErr != nil {
log.Warn("BatchProposer.UpdateCommitTxHashAndRollupStatus update the batch's commit tx hash failure", "hash", dbBatch.Hash, "error", dbErr)
return dbErr
}
}
return nil
})
if err != nil {

View File

@@ -199,7 +199,7 @@ func (p *BundleProposer) proposeBundle() error {
currentTimeSec := uint64(time.Now().Unix())
if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec {
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "current time", currentTimeSec)
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.bundleTimeoutSec, "current time", currentTimeSec)
batches, err = p.allBatchesCommittedInSameTXIncluded(batches)
if err != nil {

View File

@@ -36,6 +36,7 @@ type ChunkProposer struct {
gasCostIncreaseMultiplier float64
maxUncompressedBatchBytesSize uint64
replayMode bool
minCodecVersion encoding.CodecVersion
chainCfg *params.ChainConfig
@@ -91,6 +92,7 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
replayMode: false,
minCodecVersion: minCodecVersion,
chainCfg: chainCfg,
@@ -175,6 +177,14 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
return p
}
// SetReplayDB sets the replay database for the ChunkProposer.
// This is used for the proposer tool only, to change the l2_block data source.
// This function is not thread-safe and should be called after initializing the ChunkProposer and before starting to propose chunks.
func (p *ChunkProposer) SetReplayDB(replayDB *gorm.DB) {
p.l2BlockOrm = orm.NewL2Block(replayDB)
p.replayMode = true
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
@@ -241,9 +251,12 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "err", err)
return err
}
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber, "err", err)
return err
// In replayMode we don't need to update chunk_hash in l2_block table.
if !p.replayMode {
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
log.Error("failed to update chunk_hash for l2_block", "chunk hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber, "err", err)
return err
}
}
return nil
})
@@ -436,6 +449,12 @@ func (p *ChunkProposer) recordTimerChunkMetrics(metrics *utils.ChunkMetrics) {
}
func (p *ChunkProposer) tryProposeEuclidTransitionChunk(blocks []*encoding.Block) (bool, error) {
// If we are in replay mode, there is a corner case when StartL2Block is set as 0 in this check,
// it needs to get genesis block, but in mainnet db there is no genesis block, so we need to bypass this check.
if p.replayMode {
return false, nil
}
if !p.chainCfg.IsEuclid(blocks[0].Header.Time) {
return false, nil
}

View File

@@ -0,0 +1,160 @@
package watcher
import (
"context"
"fmt"
"math/big"
"time"
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/params"
"gorm.io/gorm"
"scroll-tech/common/database"
"scroll-tech/common/utils"
"scroll-tech/database/migrate"
"scroll-tech/rollup/internal/config"
"scroll-tech/rollup/internal/orm"
rutils "scroll-tech/rollup/internal/utils"
)
// ProposerTool is a tool for proposing chunks and bundles to the L1 chain.
type ProposerTool struct {
ctx context.Context
cancel context.CancelFunc
db *gorm.DB
dbForReplay *gorm.DB
client *ethclient.Client
chunkProposer *ChunkProposer
batchProposer *BatchProposer
bundleProposer *BundleProposer
}
// NewProposerTool creates a new ProposerTool instance.
func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config.ConfigForReplay, startL2BlockHeight uint64, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig) (*ProposerTool, error) {
// Init db connection
db, err := database.InitDB(cfg.DBConfig)
if err != nil {
return nil, fmt.Errorf("failed to init db connection: %w", err)
}
sqlDB, err := db.DB()
if err != nil {
return nil, fmt.Errorf("failed to get db connection: %w", err)
}
if err = migrate.ResetDB(sqlDB); err != nil {
return nil, fmt.Errorf("failed to reset db: %w", err)
}
log.Info("successfully reset db")
// Init dbForReplay connection
dbForReplay, err := database.InitDB(cfg.DBConfigForReplay)
if err != nil {
return nil, fmt.Errorf("failed to init dbForReplay connection: %w", err)
}
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to connect to L2 geth, endpoint: %s, err: %w", cfg.L2Config.Endpoint, err)
}
prevChunk, err := orm.NewChunk(dbForReplay).GetParentChunkByBlockNumber(ctx, startL2BlockHeight)
if err != nil {
return nil, fmt.Errorf("failed to get previous chunk: %w", err)
}
var startQueueIndex uint64
if prevChunk != nil {
startQueueIndex = prevChunk.TotalL1MessagesPoppedBefore + prevChunk.TotalL1MessagesPoppedInChunk
}
startBlock := uint64(0)
if prevChunk != nil {
startBlock = prevChunk.EndBlockNumber + 1
}
var chunk *encoding.Chunk
for blockNum := startBlock; blockNum <= startL2BlockHeight; blockNum++ {
block, err := client.BlockByNumber(ctx, new(big.Int).SetUint64(blockNum))
if err != nil {
return nil, fmt.Errorf("failed to get block %d: %w", blockNum, err)
}
for _, tx := range block.Transactions() {
if tx.Type() == gethTypes.L1MessageTxType {
startQueueIndex++
}
}
if blockNum == startL2BlockHeight {
chunk = &encoding.Chunk{Blocks: []*encoding.Block{{Header: block.Header()}}}
}
}
// Setting empty hash as the post_l1_message_queue_hash of the first chunk,
// i.e., treating the first L1 message after this chunk as the first L1 message in message queue v2.
// Though this setting is different from mainnet, it's simple yet sufficient for data analysis usage.
_, err = orm.NewChunk(db).InsertTestChunkForProposerTool(ctx, chunk, minCodecVersion, startQueueIndex)
if err != nil {
return nil, fmt.Errorf("failed to insert chunk, minCodecVersion: %d, startQueueIndex: %d, err: %w", minCodecVersion, startQueueIndex, err)
}
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
var dbBatch *orm.Batch
dbBatch, err = orm.NewBatch(db).InsertBatch(ctx, batch, encoding.CodecV0, rutils.BatchMetrics{})
if err != nil {
return nil, fmt.Errorf("failed to insert batch: %w", err)
}
if err = orm.NewChunk(db).UpdateBatchHashInRange(ctx, 0, 0, dbBatch.Hash); err != nil {
return nil, fmt.Errorf("failed to update batch hash for chunks: %w", err)
}
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
chunkProposer.SetReplayDB(dbForReplay)
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
batchProposer.SetReplayDB(dbForReplay)
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)
return &ProposerTool{
ctx: ctx,
cancel: cancel,
db: db,
dbForReplay: dbForReplay,
client: client,
chunkProposer: chunkProposer,
batchProposer: batchProposer,
bundleProposer: bundleProposer,
}, nil
}
func (p *ProposerTool) Start() {
go utils.Loop(p.ctx, 100*time.Millisecond, p.chunkProposer.TryProposeChunk)
go utils.Loop(p.ctx, 100*time.Millisecond, p.batchProposer.TryProposeBatch)
go utils.Loop(p.ctx, 100*time.Millisecond, p.bundleProposer.TryProposeBundle)
}
func (p *ProposerTool) Stop() {
p.cancel()
if err := database.CloseDB(p.db); err != nil {
log.Error("failed to close db connection", "error", err)
}
if err := database.CloseDB(p.dbForReplay); err != nil {
log.Error("failed to close dbForReplay connection", "error", err)
}
p.client.Close()
}

View File

@@ -56,10 +56,6 @@ type Batch struct {
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
// gas oracle
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
// blob
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
@@ -166,7 +162,8 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
return latestBatch.EndChunkIndex + 1, nil
}
// GetCommittedBatchesGEIndexGECodecVersion retrieves batches that have been committed (commit_tx_hash is set) and have a batch index greater than or equal to the given index and codec version.
// GetCommittedBatchesGEIndexGECodecVersion retrieves batches that have been committed (commit_tx_hash is set) and not finalized (finalize_tx_hash is NULL).
// It returns batches that have an index greater than or equal to the given index and codec version.
// The returned batches are sorted in ascending order by their index.
func (o *Batch) GetCommittedBatchesGEIndexGECodecVersion(ctx context.Context, index uint64, codecv encoding.CodecVersion, limit int) ([]*Batch, error) {
db := o.db.WithContext(ctx)
@@ -174,6 +171,7 @@ func (o *Batch) GetCommittedBatchesGEIndexGECodecVersion(ctx context.Context, in
db = db.Where("index >= ?", index)
db = db.Where("codec_version >= ?", codecv)
db = db.Where("commit_tx_hash IS NOT NULL") // only include committed batches
db = db.Where("finalize_tx_hash IS NULL") // exclude finalized batches
db = db.Order("index ASC")
if limit > 0 {
@@ -310,7 +308,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
ProvingStatus: int16(types.ProvingTaskUnassigned),
RollupStatus: int16(types.RollupPending),
OracleStatus: int16(types.GasOraclePending),
TotalL1CommitGas: metrics.L1CommitGas,
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
BlobDataProof: batchMeta.BatchBlobDataProof,
@@ -331,22 +328,6 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
return &newBatch, nil
}
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
updateFields := make(map[string]interface{})
updateFields["oracle_status"] = int(status)
updateFields["oracle_tx_hash"] = txHash
db := o.db.WithContext(ctx)
db = db.Model(&Batch{})
db = db.Where("hash", hash)
if err := db.Updates(updateFields).Error; err != nil {
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
}
return nil
}
// UpdateProvingStatus updates the proving status of a batch.
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
@@ -405,7 +386,12 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})
updateFields["commit_tx_hash"] = commitTxHash
updateFields["rollup_status"] = int(status)
updateFields["rollup_status"] = gorm.Expr(
`CASE
WHEN rollup_status NOT IN (?, ?) THEN ?
ELSE rollup_status
END`,
types.RollupFinalizing, types.RollupFinalized, int(status))
if status == types.RollupCommitted {
updateFields["committed_at"] = utils.NowUTC()
}
@@ -416,15 +402,6 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
}
db = db.WithContext(ctx)
var currentBatch Batch
if err := db.Where("hash", hash).First(&currentBatch).Error; err != nil {
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error when querying current status: %w, batch hash: %v", err, hash)
}
if types.RollupStatus(currentBatch.RollupStatus) == types.RollupFinalizing || types.RollupStatus(currentBatch.RollupStatus) == types.RollupFinalized {
return nil
}
db = db.Model(&Batch{})
db = db.Where("hash", hash)

View File

@@ -179,6 +179,25 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
return chunks, nil
}
// GetParentChunkByBlockNumber retrieves the parent chunk by block number
// only used by proposer tool for analysis usage
func (o *Chunk) GetParentChunkByBlockNumber(ctx context.Context, blockNumber uint64) (*Chunk, error) {
db := o.db.WithContext(ctx)
db = db.Model(&Chunk{})
db = db.Where("end_block_number < ?", blockNumber)
db = db.Order("end_block_number DESC")
db = db.Limit(1)
var chunk Chunk
if err := db.First(&chunk).Error; err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil
}
return nil, fmt.Errorf("Chunk.GetParentChunkByBlockNumber error: %w", err)
}
return &chunk, nil
}
// InsertChunk inserts a new chunk into the database.
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics rutils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
@@ -259,6 +278,51 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
return &newChunk, nil
}
// InsertTestChunkForProposerTool inserts a new chunk into the database only for analysis usage by proposer tool.
func (o *Chunk) InsertTestChunkForProposerTool(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, totalL1MessagePoppedBefore uint64, dbTX ...*gorm.DB) (*Chunk, error) {
if chunk == nil || len(chunk.Blocks) == 0 {
return nil, errors.New("invalid args")
}
chunkHash, err := rutils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
if err != nil {
log.Error("failed to get chunk hash", "err", err)
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
}
numBlocks := len(chunk.Blocks)
firstBlock := chunk.Blocks[0]
lastBlock := chunk.Blocks[numBlocks-1]
newChunk := Chunk{
Index: 0,
Hash: chunkHash.Hex(),
StartBlockNumber: firstBlock.Header.Number.Uint64(),
StartBlockHash: firstBlock.Header.Hash().Hex(),
EndBlockNumber: lastBlock.Header.Number.Uint64(),
EndBlockHash: lastBlock.Header.Hash().Hex(),
TotalL2TxGas: chunk.TotalGasUsed(),
TotalL2TxNum: chunk.NumL2Transactions(),
StartBlockTime: firstBlock.Header.Time,
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
StateRoot: lastBlock.Header.Root.Hex(),
WithdrawRoot: lastBlock.WithdrawRoot.Hex(),
CodecVersion: int16(codecVersion),
}
db := o.db
if len(dbTX) > 0 && dbTX[0] != nil {
db = dbTX[0]
}
db = db.WithContext(ctx)
db = db.Model(&Chunk{})
if err := db.Create(&newChunk).Error; err != nil {
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
}
return &newChunk, nil
}
// UpdateProvingStatus updates the proving status of a chunk.
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
updateFields := make(map[string]interface{})

View File

@@ -300,16 +300,12 @@ func TestBatchOrm(t *testing.T) {
assert.NoError(t, err)
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
assert.NoError(t, err)
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
assert.NoError(t, err)
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commitTxHash", types.RollupCommitted)
assert.NoError(t, err)
@@ -318,7 +314,7 @@ func TestBatchOrm(t *testing.T) {
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, updatedBatch)
assert.Equal(t, "", updatedBatch.CommitTxHash)
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
@@ -359,10 +355,7 @@ func TestBatchOrm(t *testing.T) {
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
assert.NoError(t, err)
assert.Equal(t, 1, len(batches))
assert.Equal(t, batchHash1, batches[0].Hash)
assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus))
assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus))
assert.Equal(t, 0, len(batches))
}
}

View File

@@ -0,0 +1,40 @@
{
"l2_config": {
"endpoint": "https://rpc.scroll.io",
"chunk_proposer_config": {
"max_block_num_per_chunk": 100,
"max_tx_num_per_chunk": 100,
"max_l2_gas_per_chunk": 20000000,
"max_l1_commit_gas_per_chunk": 5000000,
"max_l1_commit_calldata_size_per_chunk": 123740,
"chunk_timeout_sec": 72000000000,
"max_row_consumption_per_chunk": 10000000000,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634693
},
"batch_proposer_config": {
"max_l1_commit_gas_per_batch": 5000000,
"max_l1_commit_calldata_size_per_batch": 123740,
"batch_timeout_sec": 72000000000,
"gas_cost_increase_multiplier": 1.2,
"max_uncompressed_batch_bytes_size": 634693,
"max_chunks_per_batch": 45
},
"bundle_proposer_config": {
"max_batch_num_per_bundle": 45,
"bundle_timeout_sec": 36000000000
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://postgres:postgres@db:5432/scroll?sslmode=disable",
"maxOpenNum": 200,
"maxIdleNum": 20
},
"db_config_for_replay": {
"driver_name": "postgres",
"dsn": "<mainnet read db config>",
"maxOpenNum": 200,
"maxIdleNum": 20
}
}

View File

@@ -0,0 +1,19 @@
{
"config": {
"chainId": 534352,
"bernoulliBlock": 0,
"curieBlock": 0,
"darwinTime": 0,
"darwinV2Time": 0,
"euclidTime": 0,
"euclidV2Time": 0
},
"nonce": "0x0000000000000033",
"timestamp": "0x0",
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"gasLimit": "0x8000000",
"difficulty": "0x100",
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
"coinbase": "0x3333333333333333333333333333333333333333",
"alloc": {}
}

View File

@@ -0,0 +1,30 @@
# Download Go dependencies
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
WORKDIR /src
COPY go.work* ./
COPY ./rollup/go.* ./rollup/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
RUN go mod download -x
# Build proposer_tool
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/rollup/cmd/proposer_tool/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/proposer_tool
# Pull proposer_tool into a second stage deploy ubuntu container
FROM ubuntu:20.04
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
ENV CGO_LDFLAGS="-ldl"
COPY --from=builder /bin/proposer_tool /bin/
WORKDIR /app
ENTRYPOINT ["proposer_tool"]

View File

@@ -211,8 +211,7 @@ func TestFunction(t *testing.T) {
t.Run("testCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
t.Run("TestCommitBatchAndFinalizeBundleCodecV7", testCommitBatchAndFinalizeBundleCodecV7)
// l1/l2 gas oracle
// l1 gas oracle
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
t.Run("TestImportDefaultL1GasPriceDueToL1GasPriceSpike", testImportDefaultL1GasPriceDueToL1GasPriceSpike)
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
}

View File

@@ -9,7 +9,6 @@ import (
"github.com/scroll-tech/da-codec/encoding"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/params"
"github.com/stretchr/testify/assert"
"scroll-tech/common/database"
@@ -201,56 +200,3 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
assert.Empty(t, blocks[0].OracleTxHash)
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
}
func testImportL2GasPrice(t *testing.T) {
db := setupDB(t)
defer database.CloseDB(db)
prepareContracts(t)
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, false, relayer.ServiceTypeL2GasOracle, nil)
assert.NoError(t, err)
defer l2Relayer.StopSenders()
// add fake chunk
chunk := &encoding.Chunk{
Blocks: []*encoding.Block{
{
Header: &gethTypes.Header{
Number: big.NewInt(1),
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
Transactions: nil,
WithdrawRoot: common.Hash{},
RowConsumption: &gethTypes.RowConsumption{},
},
},
}
batch := &encoding.Batch{
Index: 0,
TotalL1MessagePoppedBefore: 0,
ParentBatchHash: common.Hash{},
Chunks: []*encoding.Chunk{chunk},
}
batchOrm := orm.NewBatch(db)
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
assert.NoError(t, err)
// check db status
dbBatch, err := batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.Empty(t, dbBatch.OracleTxHash)
assert.Equal(t, types.GasOracleStatus(dbBatch.OracleStatus), types.GasOraclePending)
// relay gas price
l2Relayer.ProcessGasPriceOracle()
dbBatch, err = batchOrm.GetLatestBatch(context.Background())
assert.NoError(t, err)
assert.NotNil(t, batch)
assert.NotEmpty(t, dbBatch.OracleTxHash)
assert.Equal(t, types.GasOracleStatus(dbBatch.OracleStatus), types.GasOracleImporting)
}

View File

@@ -33,7 +33,7 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
prepareContracts(t)
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, &params.ChainConfig{}, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
assert.NotNil(t, l2Relayer)
defer l2Relayer.StopSenders()
@@ -65,7 +65,7 @@ func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, relayer.ServiceTypeL2RollupRelayer, nil)
assert.NoError(t, err)
// add some blocks to db
@@ -236,7 +236,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
// Create L2Relayer
l2Cfg := rollupApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, relayer.ServiceTypeL2RollupRelayer, nil)
require.NoError(t, err)
defer l2Relayer.StopSenders()

632
zkvm-prover/Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -18,7 +18,7 @@ serde = { version = "1.0.198", features = ["derive"] }
serde_json = "1.0.116"
futures = "0.3.30"
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.2.0", package = "scroll-zkvm-prover" }
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.3.0", package = "scroll-zkvm-prover" }
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "main", features = [
@@ -51,28 +51,28 @@ openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gp
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
[patch."https://github.com/Plonky3/Plonky3.git"]
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
"nightly-features",
], tag = "v0.1.0" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
], tag = "v0.1.1" }
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }