mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-12 23:48:15 -05:00
Compare commits
190 Commits
v4.5.9
...
tools/get-
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
634cb73ff2 | ||
|
|
dced0c6a82 | ||
|
|
f045984c99 | ||
|
|
38af170acc | ||
|
|
c65622b7f6 | ||
|
|
80af42695d | ||
|
|
4c4cff0ca4 | ||
|
|
0df9ede841 | ||
|
|
d1138653e0 | ||
|
|
1572680566 | ||
|
|
8f4fc9af95 | ||
|
|
917b143557 | ||
|
|
c4849251c6 | ||
|
|
9bb768e454 | ||
|
|
9033471930 | ||
|
|
886af073c1 | ||
|
|
2b318ec7c7 | ||
|
|
4c2a75576f | ||
|
|
2a8330c346 | ||
|
|
d82e109360 | ||
|
|
42bfcb84d7 | ||
|
|
83c0a0870c | ||
|
|
78a458daa5 | ||
|
|
01d0e48e9a | ||
|
|
5cfb8b6a69 | ||
|
|
b59db732c3 | ||
|
|
899476731d | ||
|
|
1bec964097 | ||
|
|
b73acca200 | ||
|
|
77dceaea35 | ||
|
|
d0cb8b9aa5 | ||
|
|
ed057286d9 | ||
|
|
b3e46673f6 | ||
|
|
2fb27ceb3d | ||
|
|
e3332885ed | ||
|
|
3ee2d2b39c | ||
|
|
4b21c79443 | ||
|
|
c6f0299373 | ||
|
|
3454c6c670 | ||
|
|
901693a2c0 | ||
|
|
0bb53140f5 | ||
|
|
09790c4448 | ||
|
|
ae212a919a | ||
|
|
9b5c42e9d9 | ||
|
|
60877d3c16 | ||
|
|
07d1621310 | ||
|
|
11afeb1354 | ||
|
|
cf41048c0a | ||
|
|
77d63226c5 | ||
|
|
135073c0ad | ||
|
|
bab0e4f8d6 | ||
|
|
2d620ddf4f | ||
|
|
8befb84910 | ||
|
|
4822d38aba | ||
|
|
cb87c7aedd | ||
|
|
3a3db5fe32 | ||
|
|
b4546af434 | ||
|
|
459941d942 | ||
|
|
9f480e5397 | ||
|
|
7d4ff80edf | ||
|
|
5869bfd825 | ||
|
|
12a262ad99 | ||
|
|
7d5b77a36c | ||
|
|
5f8bb53dce | ||
|
|
87e1235c7f | ||
|
|
86e6555a54 | ||
|
|
e3b17a0740 | ||
|
|
ef9e25f14c | ||
|
|
0fc28cb511 | ||
|
|
ad2e94e190 | ||
|
|
2846ecffa5 | ||
|
|
0e82c63ac4 | ||
|
|
9996af6227 | ||
|
|
8cf087c63b | ||
|
|
b984341991 | ||
|
|
7486236a7a | ||
|
|
a6ed321666 | ||
|
|
8db4e5c77d | ||
|
|
5cf8cda8a7 | ||
|
|
bcc6b0f7e0 | ||
|
|
fe6451b76c | ||
|
|
be88ef6c39 | ||
|
|
64368f9a79 | ||
|
|
f288179451 | ||
|
|
b8c7ec2b22 | ||
|
|
88da49383c | ||
|
|
1ea9acafa3 | ||
|
|
c743efd99e | ||
|
|
2d40f0f942 | ||
|
|
fcbaa674c6 | ||
|
|
110083c6c8 | ||
|
|
b3c1df7557 | ||
|
|
893bf18d62 | ||
|
|
7ec6d478b3 | ||
|
|
eacdc78ba7 | ||
|
|
2cc9f65852 | ||
|
|
af381223f3 | ||
|
|
bb6ee2c932 | ||
|
|
e99a8515b9 | ||
|
|
38b3239c6b | ||
|
|
d987931e30 | ||
|
|
90d15637eb | ||
|
|
4d677b344b | ||
|
|
d57e6b0e7b | ||
|
|
9b462e4c98 | ||
|
|
c9f6e8c6e1 | ||
|
|
867307d576 | ||
|
|
20dffe4ea5 | ||
|
|
57d50b7183 | ||
|
|
7a70e374b8 | ||
|
|
0799dd48f2 | ||
|
|
224546e380 | ||
|
|
95adcc378f | ||
|
|
47219f2d86 | ||
|
|
ab7038c0a7 | ||
|
|
d79aaef35a | ||
|
|
da963313b6 | ||
|
|
f27ddb7f8e | ||
|
|
94bee1903a | ||
|
|
b7e7d1a1f1 | ||
|
|
f1ea4b315c | ||
|
|
8b08a57f63 | ||
|
|
a868bc1531 | ||
|
|
101cc46bd9 | ||
|
|
9f4c9ee150 | ||
|
|
03c63a62cf | ||
|
|
b30f4d0b00 | ||
|
|
4333d51bef | ||
|
|
82dd5e0e5e | ||
|
|
f91c999005 | ||
|
|
c8b614fd2f | ||
|
|
a1c4562432 | ||
|
|
d6674e8a3d | ||
|
|
55b32e1c0c | ||
|
|
8ea431514d | ||
|
|
26a49cb2a3 | ||
|
|
e27ab5a396 | ||
|
|
554a233928 | ||
|
|
673777fe63 | ||
|
|
7353f30ff6 | ||
|
|
eb5758b693 | ||
|
|
47a6c23b1f | ||
|
|
081d28988d | ||
|
|
782e019f9c | ||
|
|
89ede0d315 | ||
|
|
a55de1fc09 | ||
|
|
ed394a6369 | ||
|
|
121ce09c80 | ||
|
|
0125dd62a6 | ||
|
|
bb9d404e85 | ||
|
|
e1a0bab452 | ||
|
|
50ebf179fd | ||
|
|
01fa3b34a7 | ||
|
|
2e9827a750 | ||
|
|
867fda6952 | ||
|
|
fbc14ac91b | ||
|
|
37924b0ae7 | ||
|
|
8b57dd6381 | ||
|
|
f13863e542 | ||
|
|
d3acd6b510 | ||
|
|
83c73f8458 | ||
|
|
bf084368c5 | ||
|
|
d503d4a990 | ||
|
|
ac17696171 | ||
|
|
b424cef816 | ||
|
|
e5ad9c618d | ||
|
|
848d3a6827 | ||
|
|
2bd0655fda | ||
|
|
f01af24908 | ||
|
|
2de45f0d54 | ||
|
|
c3a3bad800 | ||
|
|
9412c7ff3a | ||
|
|
5f2295043e | ||
|
|
69a80d4a4a | ||
|
|
8db5339c1f | ||
|
|
99c0a9fac5 | ||
|
|
f4e17bcca6 | ||
|
|
e713424e5c | ||
|
|
2efbbd7d77 | ||
|
|
310abdd543 | ||
|
|
5a479c3a08 | ||
|
|
783b965deb | ||
|
|
182f8e307c | ||
|
|
b460d4a717 | ||
|
|
421afe9c30 | ||
|
|
ca8d930bd6 | ||
|
|
940fde0cbf | ||
|
|
78c99636dc | ||
|
|
0c0c417829 | ||
|
|
41606fe7d7 |
4
.github/workflows/common.yml
vendored
4
.github/workflows/common.yml
vendored
@@ -42,10 +42,6 @@ jobs:
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
workspaces: "common/libzkp/impl -> target"
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Lint
|
||||
working-directory: 'common'
|
||||
run: |
|
||||
|
||||
7
.github/workflows/docker.yml
vendored
7
.github/workflows/docker.yml
vendored
@@ -307,13 +307,6 @@ jobs:
|
||||
REPOSITORY: coordinator-api
|
||||
run: |
|
||||
aws --region ${{ env.AWS_REGION }} ecr describe-repositories --repository-names ${{ env.REPOSITORY }} && : || aws --region ${{ env.AWS_REGION }} ecr create-repository --repository-name ${{ env.REPOSITORY }}
|
||||
- name: Setup SSH for private repos
|
||||
uses: webfactory/ssh-agent@v0.9.0
|
||||
with:
|
||||
ssh-private-key: ${{ secrets.OPENVM_GPU_SSH_PRIVATE_KEY }}
|
||||
- name: Run custom script
|
||||
run: |
|
||||
./build/dockerfiles/coordinator-api/init-openvm.sh
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v3
|
||||
env:
|
||||
|
||||
2
.github/workflows/intermediate-docker.yml
vendored
2
.github/workflows/intermediate-docker.yml
vendored
@@ -24,7 +24,6 @@ on:
|
||||
options:
|
||||
- nightly-2023-12-03
|
||||
- nightly-2022-12-10
|
||||
- 1.86.0
|
||||
default: "nightly-2023-12-03"
|
||||
PYTHON_VERSION:
|
||||
description: "Python version"
|
||||
@@ -48,7 +47,6 @@ on:
|
||||
type: choice
|
||||
options:
|
||||
- 0.1.41
|
||||
- 0.1.71
|
||||
BASE_IMAGE:
|
||||
description: "which intermediate image you want to update"
|
||||
required: true
|
||||
|
||||
@@ -9,10 +9,6 @@ RUN cargo chef prepare --recipe-path recipe.json
|
||||
FROM chef as zkp-builder
|
||||
COPY ./common/libzkp/impl/rust-toolchain ./
|
||||
COPY --from=planner /app/recipe.json recipe.json
|
||||
# run ./build/dockerfiles/coordinator-api/init-openvm.sh to get openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/openvm-gpu /openvm-gpu
|
||||
COPY ./build/dockerfiles/coordinator-api/gitconfig /root/.gitconfig
|
||||
COPY ./build/dockerfiles/coordinator-api/config.toml /root/.cargo/config.toml
|
||||
RUN cargo chef cook --release --recipe-path recipe.json
|
||||
|
||||
COPY ./common/libzkp/impl .
|
||||
|
||||
@@ -1,24 +0,0 @@
|
||||
# openvm
|
||||
# same order and features as zkvm-prover/Cargo.toml.gpu
|
||||
[patch."ssh://git@github.com/scroll-tech/openvm-gpu.git"]
|
||||
openvm = { path = "/openvm-gpu/crates/toolchain/openvm", default-features = false }
|
||||
openvm-algebra-complex-macros = { path = "/openvm-gpu/extensions/algebra/complex-macros", default-features = false }
|
||||
openvm-algebra-guest = { path = "/openvm-gpu/extensions/algebra/guest", default-features = false }
|
||||
openvm-bigint-guest = { path = "/openvm-gpu/extensions/bigint/guest", default-features = false }
|
||||
openvm-build = { path = "/openvm-gpu/crates/toolchain/build", default-features = false }
|
||||
openvm-circuit = { path = "/openvm-gpu/crates/vm", default-features = false }
|
||||
openvm-custom-insn = { path = "/openvm-gpu/crates/toolchain/custom_insn", default-features = false }
|
||||
openvm-continuations = { path = "/openvm-gpu/crates/continuations", default-features = false }
|
||||
openvm-ecc-guest = { path = "/openvm-gpu/extensions/ecc/guest", default-features = false }
|
||||
openvm-instructions ={ path = "/openvm-gpu/crates/toolchain/instructions", default-features = false }
|
||||
openvm-keccak256-guest = { path = "/openvm-gpu/extensions/keccak256/guest", default-features = false }
|
||||
openvm-native-circuit = { path = "/openvm-gpu/extensions/native/circuit", default-features = false }
|
||||
openvm-native-compiler = { path = "/openvm-gpu/extensions/native/compiler", default-features = false }
|
||||
openvm-native-recursion = { path = "/openvm-gpu/extensions/native/recursion", default-features = false }
|
||||
openvm-native-transpiler = { path = "/openvm-gpu/extensions/native/transpiler", default-features = false }
|
||||
openvm-pairing-guest = { path = "/openvm-gpu/extensions/pairing/guest", default-features = false }
|
||||
openvm-rv32im-guest = { path = "/openvm-gpu/extensions/rv32im/guest", default-features = false }
|
||||
openvm-rv32im-transpiler = { path = "/openvm-gpu/extensions/rv32im/transpiler", default-features = false }
|
||||
openvm-sdk = { path = "/openvm-gpu/crates/sdk", default-features = false, features = ["parallel", "bench-metrics"] }
|
||||
openvm-sha256-guest = { path = "/openvm-gpu/extensions/sha256/guest", default-features = false }
|
||||
openvm-transpiler = { path = "/openvm-gpu/crates/toolchain/transpiler", default-features = false }
|
||||
@@ -1,2 +0,0 @@
|
||||
[url "https://github.com/"]
|
||||
insteadOf = ssh://git@github.com/
|
||||
@@ -1,12 +0,0 @@
|
||||
#!/bin/bash
|
||||
set -uex
|
||||
|
||||
OPENVM_GPU_COMMIT=dfa10b4
|
||||
|
||||
DIR=$(cd "$(dirname "${BASH_SOURCE[0]}")" > /dev/null 2>&1 && pwd)
|
||||
|
||||
# checkout openvm-gpu
|
||||
if [ ! -d $DIR/openvm-gpu ]; then
|
||||
git clone git@github.com:scroll-tech/openvm-gpu.git $DIR/openvm-gpu
|
||||
fi
|
||||
cd $DIR/openvm-gpu && git fetch && git checkout ${OPENVM_GPU_COMMIT}
|
||||
246
common/libzkp/impl/Cargo.lock
generated
246
common/libzkp/impl/Cargo.lock
generated
@@ -2852,8 +2852,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"bytemuck",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -2865,8 +2865,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -2894,7 +2894,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "openvm-algebra-complex-macros"
|
||||
version = "0.1.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-macros-common",
|
||||
"quote",
|
||||
@@ -2903,8 +2903,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"halo2curves-axiom",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -2916,8 +2916,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-moduli-macros"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-macros-common",
|
||||
"quote",
|
||||
@@ -2926,8 +2926,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-algebra-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-algebra-guest",
|
||||
"openvm-instructions",
|
||||
@@ -2940,8 +2940,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-bigint-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -2962,8 +2962,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-bigint-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"num-bigint 0.4.6",
|
||||
"num-traits",
|
||||
@@ -2976,8 +2976,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-bigint-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-bigint-guest",
|
||||
"openvm-instructions",
|
||||
@@ -2991,8 +2991,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-build"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"cargo_metadata",
|
||||
"eyre",
|
||||
@@ -3003,8 +3003,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"cfg-if",
|
||||
@@ -3034,8 +3034,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"quote",
|
||||
@@ -3044,8 +3044,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit-primitives"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"itertools 0.14.0",
|
||||
@@ -3059,8 +3059,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-circuit-primitives-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"quote",
|
||||
@@ -3069,8 +3069,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-continuations"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"openvm-circuit",
|
||||
@@ -3085,7 +3085,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "openvm-custom-insn"
|
||||
version = "0.1.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -3094,8 +3094,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3125,8 +3125,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"ecdsa",
|
||||
"elliptic-curve",
|
||||
@@ -3150,8 +3150,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-sw-macros"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-macros-common",
|
||||
"quote",
|
||||
@@ -3160,8 +3160,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-ecc-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-ecc-guest",
|
||||
"openvm-instructions",
|
||||
@@ -3174,8 +3174,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-instructions"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"derive-new 0.6.0",
|
||||
@@ -3191,8 +3191,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-instructions-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn 2.0.98",
|
||||
@@ -3200,8 +3200,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-keccak256-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3226,8 +3226,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-keccak256-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-platform",
|
||||
"tiny-keccak",
|
||||
@@ -3235,8 +3235,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-keccak256-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3249,16 +3249,16 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-macros-common"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"syn 2.0.98",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "openvm-mod-circuit-builder"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -3276,8 +3276,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3303,8 +3303,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-compiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"itertools 0.14.0",
|
||||
@@ -3327,8 +3327,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-compiler-derive"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"quote",
|
||||
"syn 2.0.98",
|
||||
@@ -3336,8 +3336,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-native-recursion"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"itertools 0.14.0",
|
||||
@@ -3364,8 +3364,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-pairing-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3394,8 +3394,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-pairing-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"group 0.13.0",
|
||||
"halo2curves-axiom",
|
||||
@@ -3420,8 +3420,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-pairing-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3434,8 +3434,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-platform"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"getrandom 0.2.15",
|
||||
"libm",
|
||||
@@ -3445,8 +3445,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-poseidon2-air"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"lazy_static",
|
||||
@@ -3462,8 +3462,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32-adapters"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"itertools 0.14.0",
|
||||
@@ -3482,8 +3482,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32im-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3505,8 +3505,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32im-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-custom-insn",
|
||||
"strum_macros 0.26.4",
|
||||
@@ -3514,8 +3514,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-rv32im-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3530,8 +3530,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sdk"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bitcode",
|
||||
@@ -3577,8 +3577,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-air"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-circuit-primitives",
|
||||
"openvm-stark-backend",
|
||||
@@ -3588,8 +3588,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-circuit"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"derive-new 0.6.0",
|
||||
"derive_more 1.0.0",
|
||||
@@ -3611,8 +3611,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-guest"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-platform",
|
||||
"sha2",
|
||||
@@ -3620,8 +3620,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-sha256-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"openvm-instructions",
|
||||
"openvm-instructions-derive",
|
||||
@@ -3634,8 +3634,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-stark-backend"
|
||||
version = "1.0.0"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?rev=b051e8978da9c829a76b262abf4a9736c8d1681e#b051e8978da9c829a76b262abf4a9736c8d1681e"
|
||||
dependencies = [
|
||||
"bitcode",
|
||||
"cfg-if",
|
||||
@@ -3660,8 +3660,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-stark-sdk"
|
||||
version = "1.0.0"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?tag=v1.0.0#884f8e6aabf72bde00dc51f1f1121277bff73b1e"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/stark-backend.git?rev=b051e8978da9c829a76b262abf4a9736c8d1681e#b051e8978da9c829a76b262abf4a9736c8d1681e"
|
||||
dependencies = [
|
||||
"derivative",
|
||||
"derive_more 0.99.19",
|
||||
@@ -3695,8 +3695,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "openvm-transpiler"
|
||||
version = "1.0.0"
|
||||
source = "git+ssh://git@github.com/scroll-tech/openvm-gpu.git?rev=dfa10b4#dfa10b4a06e0b2017b30c4c9dace31c4dd908df4"
|
||||
version = "1.0.0-rc.2"
|
||||
source = "git+https://github.com/openvm-org/openvm.git?rev=3c35e9f#3c35e9f369da8dee065a089fb72f3580af7dcaf9"
|
||||
dependencies = [
|
||||
"elf",
|
||||
"eyre",
|
||||
@@ -3737,7 +3737,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-air"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-matrix",
|
||||
@@ -3746,7 +3746,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-baby-bear"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-mds",
|
||||
@@ -3760,7 +3760,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-blake3"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"blake3",
|
||||
"p3-symmetric",
|
||||
@@ -3770,7 +3770,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-bn254-fr"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"ff 0.13.0",
|
||||
"halo2curves",
|
||||
@@ -3785,7 +3785,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-challenger"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-maybe-rayon",
|
||||
@@ -3797,7 +3797,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-commit"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-challenger",
|
||||
@@ -3811,7 +3811,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-dft"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -3824,7 +3824,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-field"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -3841,7 +3841,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-fri"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-challenger",
|
||||
@@ -3860,7 +3860,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-goldilocks"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"num-bigint 0.4.6",
|
||||
"p3-dft",
|
||||
@@ -3877,7 +3877,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-interpolation"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-matrix",
|
||||
@@ -3888,7 +3888,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-keccak"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -3900,7 +3900,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-keccak-air"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-air",
|
||||
"p3-field",
|
||||
@@ -3914,7 +3914,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-matrix"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -3929,7 +3929,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-maybe-rayon"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"rayon",
|
||||
]
|
||||
@@ -3937,7 +3937,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-mds"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-dft",
|
||||
@@ -3951,7 +3951,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-merkle-tree"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-commit",
|
||||
@@ -3968,7 +3968,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-monty-31"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"num-bigint 0.4.6",
|
||||
@@ -3989,7 +3989,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-poseidon"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-field",
|
||||
"p3-mds",
|
||||
@@ -4000,7 +4000,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-poseidon2"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"gcd",
|
||||
"p3-field",
|
||||
@@ -4012,7 +4012,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-poseidon2-air"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"p3-air",
|
||||
"p3-field",
|
||||
@@ -4028,7 +4028,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-symmetric"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-field",
|
||||
@@ -4038,7 +4038,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-uni-stark"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"itertools 0.14.0",
|
||||
"p3-air",
|
||||
@@ -4056,7 +4056,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "p3-util"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=1ba4e5c#1ba4e5c40417f4f7aae86bcca56b6484b4b2490b"
|
||||
source = "git+https://github.com/Plonky3/Plonky3.git?rev=88d7f05#88d7f059500fd956a7c1eb121e08653e5974728d"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
@@ -5674,7 +5674,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "scroll-zkvm-circuit-input-types"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"alloy-serde 0.8.3",
|
||||
@@ -5696,8 +5696,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-prover"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
|
||||
dependencies = [
|
||||
"alloy-primitives",
|
||||
"base64 0.22.1",
|
||||
@@ -5732,8 +5732,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "scroll-zkvm-verifier"
|
||||
version = "0.3.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.3.0#fcc09d1517e9d254284820fe66b087331e4b1bf4"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/zkvm-prover.git?tag=v0.2.0#5854f8179d14f2afa489a499290e0ff6a953b314"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"eyre",
|
||||
|
||||
@@ -14,8 +14,8 @@ ruint = { git = "https://github.com/scroll-tech/uint.git", branch = "v1.12.3" }
|
||||
tiny-keccak = { git = "https://github.com/scroll-tech/tiny-keccak", branch = "scroll-patch-v2.0.2-openvm-v1.0.0-rc.1" }
|
||||
|
||||
[dependencies]
|
||||
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-prover" }
|
||||
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.3.0", package = "scroll-zkvm-verifier" }
|
||||
euclid_prover = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-prover" }
|
||||
euclid_verifier = { git = "https://github.com/scroll-tech/zkvm-prover.git", tag = "v0.2.0", package = "scroll-zkvm-verifier" }
|
||||
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
|
||||
@@ -276,8 +276,8 @@ const (
|
||||
SenderTypeFinalizeBatch
|
||||
// SenderTypeL1GasOracle indicates a sender from L2 responsible for updating L1 gas prices.
|
||||
SenderTypeL1GasOracle
|
||||
// SenderTypeL2GasOracleDeprecated indicates a sender from L1 responsible for updating L2 gas prices, which is deprecated.
|
||||
SenderTypeL2GasOracleDeprecated
|
||||
// SenderTypeL2GasOracle indicates a sender from L1 responsible for updating L2 gas prices.
|
||||
SenderTypeL2GasOracle
|
||||
)
|
||||
|
||||
// String returns a string representation of the SenderType.
|
||||
@@ -289,8 +289,8 @@ func (t SenderType) String() string {
|
||||
return "SenderTypeFinalizeBatch"
|
||||
case SenderTypeL1GasOracle:
|
||||
return "SenderTypeL1GasOracle"
|
||||
case SenderTypeL2GasOracleDeprecated:
|
||||
return "SenderTypeL2GasOracleDeprecated"
|
||||
case SenderTypeL2GasOracle:
|
||||
return "SenderTypeL2GasOracle"
|
||||
default:
|
||||
return fmt.Sprintf("Unknown SenderType (%d)", int32(t))
|
||||
}
|
||||
|
||||
@@ -173,9 +173,9 @@ func TestSenderType(t *testing.T) {
|
||||
"SenderTypeL1GasOracle",
|
||||
},
|
||||
{
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
SenderTypeL2GasOracleDeprecated,
|
||||
"SenderTypeL2GasOracleDeprecated",
|
||||
"SenderTypeL2GasOracle",
|
||||
SenderTypeL2GasOracle,
|
||||
"SenderTypeL2GasOracle",
|
||||
},
|
||||
{
|
||||
"Invalid Value",
|
||||
|
||||
@@ -20,12 +20,9 @@ var (
|
||||
}
|
||||
// RollupRelayerFlags contains flags only used in rollup-relayer
|
||||
RollupRelayerFlags = []cli.Flag{
|
||||
&ImportGenesisFlag,
|
||||
&MinCodecVersionFlag,
|
||||
}
|
||||
// ProposerToolFlags contains flags only used in proposer tool
|
||||
ProposerToolFlags = []cli.Flag{
|
||||
&StartL2BlockFlag,
|
||||
}
|
||||
// ConfigFileFlag load json type config file.
|
||||
ConfigFileFlag = cli.StringFlag{
|
||||
Name: "config",
|
||||
@@ -76,6 +73,12 @@ var (
|
||||
Category: "METRICS",
|
||||
Value: 6060,
|
||||
}
|
||||
// ImportGenesisFlag import genesis batch during startup
|
||||
ImportGenesisFlag = cli.BoolFlag{
|
||||
Name: "import-genesis",
|
||||
Usage: "Import genesis batch into L1 contract during startup",
|
||||
Value: false,
|
||||
}
|
||||
// ServicePortFlag is the port the service will listen on
|
||||
ServicePortFlag = cli.IntFlag{
|
||||
Name: "service.port",
|
||||
@@ -94,10 +97,4 @@ var (
|
||||
Usage: "Minimum required codec version for the chunk/batch/bundle proposers",
|
||||
Required: true,
|
||||
}
|
||||
// StartL2BlockFlag indicates the start L2 block number for proposer tool
|
||||
StartL2BlockFlag = cli.Uint64Flag{
|
||||
Name: "start-l2-block",
|
||||
Usage: "Start L2 block number for proposer tool",
|
||||
Value: 0,
|
||||
}
|
||||
)
|
||||
|
||||
@@ -5,7 +5,7 @@ import (
|
||||
"runtime/debug"
|
||||
)
|
||||
|
||||
var tag = "v4.5.9"
|
||||
var tag = "v4.5.0"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
|
||||
@@ -95,13 +95,13 @@ func (c *CoordinatorApp) MockConfig(store bool) error {
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwin",
|
||||
MinProverVersion: "v4.4.57",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &coordinatorConfig.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "darwinV2",
|
||||
MinProverVersion: "v4.4.89",
|
||||
MinProverVersion: "v4.3.0",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 60,
|
||||
|
||||
@@ -18,6 +18,8 @@ require (
|
||||
gorm.io/gorm v1.25.7-0.20240204074919-46816ad31dde
|
||||
)
|
||||
|
||||
replace github.com/scroll-tech/da-codec => github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c
|
||||
|
||||
require (
|
||||
github.com/bytedance/sonic v1.10.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
|
||||
@@ -177,8 +177,8 @@ github.com/rogpeppe/go-internal v1.12.0 h1:exVL4IDcn6na9z1rAb56Vxr+CgyK3nn3O+epU
|
||||
github.com/rogpeppe/go-internal v1.12.0/go.mod h1:E+RYuTGaKKdloAfM02xzb0FW3Paa99yedzYV+kq4uf4=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c h1:MCbuwFynRgxQeoyXwt/wUAPo3vfb61rMWxqADE2he4A=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601 h1:NEsjCG6uSvLRBlsP3+x6PL1kM+Ojs3g8UGotIPgJSz8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20250305151038-478940e79601/go.mod h1:OblWe1+QrZwdpwO0j/LY3BSGuKT3YPUFBDQQgvvfStQ=
|
||||
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
|
||||
|
||||
@@ -75,12 +75,9 @@ func (l *LoginLogic) Check(login *types.LoginParameter) error {
|
||||
return errors.New("auth message verify failure")
|
||||
}
|
||||
|
||||
// FIXME: for backward compatibility, set prover version as darwin prover version,
|
||||
// change v4.4.56 to l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion after Euclid upgrade, including the log.
|
||||
// hardcode the prover version because l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion is used in another check and should be set as v4.4.89 for darwinV2 provers.
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, "v4.4.56") {
|
||||
if !version.CheckScrollRepoVersion(login.Message.ProverVersion, l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion) {
|
||||
return fmt.Errorf("incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
"v4.4.56", login.Message.ProverVersion)
|
||||
l.cfg.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, login.Message.ProverVersion)
|
||||
}
|
||||
|
||||
if len(login.Message.ProverTypes) > 0 {
|
||||
@@ -142,12 +139,6 @@ func (l *LoginLogic) ProverHardForkName(login *types.LoginParameter) (string, er
|
||||
}
|
||||
|
||||
proverVersion := proverVersionSplits[0]
|
||||
|
||||
// allowing darwin provers to login, because darwin provers can prove darwinV2 chunk tasks
|
||||
if proverVersion == "v4.4.56" {
|
||||
return "darwin", nil
|
||||
}
|
||||
|
||||
if hardForkNames, ok := l.proverVersionHardForkMap[proverVersion]; ok {
|
||||
return strings.Join(hardForkNames, ","), nil
|
||||
}
|
||||
|
||||
@@ -121,11 +121,6 @@ func (b *BaseProverTask) hardForkSanityCheck(ctx *gin.Context, taskCtx *proverTa
|
||||
return "", getHardForkErr
|
||||
}
|
||||
|
||||
// for backward compatibility, darwin chunk prover can still prove darwinV2 chunk tasks
|
||||
if taskCtx.taskType == message.ProofTypeChunk && hardForkName == "darwinV2" && strings.HasPrefix(taskCtx.ProverVersion, "v4.4.56") {
|
||||
return hardForkName, nil
|
||||
}
|
||||
|
||||
if _, ok := taskCtx.HardForkNames[hardForkName]; !ok {
|
||||
return "", fmt.Errorf("to be assigned prover task's hard-fork name is not the same as prover, proverName: %s, proverVersion: %s, proverSupportHardForkNames: %s, taskHardForkName: %v", taskCtx.ProverName, taskCtx.ProverVersion, taskCtx.HardForkNames, hardForkName)
|
||||
}
|
||||
|
||||
173
coordinator/internal/orm/script/main.go
Normal file
173
coordinator/internal/orm/script/main.go
Normal file
@@ -0,0 +1,173 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/coordinator/internal/orm"
|
||||
)
|
||||
|
||||
func main() {
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
log.Root().SetHandler(glogger)
|
||||
|
||||
if len(os.Args) < 2 {
|
||||
log.Crit("no batch index range provided")
|
||||
return
|
||||
}
|
||||
|
||||
indexRange := os.Args[1]
|
||||
indices := strings.Split(indexRange, "-")
|
||||
if len(indices) != 2 {
|
||||
log.Crit("invalid batch index range format. Use start-end", "providedRange", indexRange)
|
||||
return
|
||||
}
|
||||
|
||||
startIndex, err := strconv.Atoi(indices[0])
|
||||
endIndex, err2 := strconv.Atoi(indices[1])
|
||||
if err != nil || err2 != nil || startIndex > endIndex {
|
||||
log.Crit("invalid batch index range", "start", indices[0], "end", indices[1], "err", err, "err2", err2)
|
||||
return
|
||||
}
|
||||
|
||||
db, err := database.InitDB(&database.Config{
|
||||
DriverName: "postgres",
|
||||
DSN: os.Getenv("DB_DSN"),
|
||||
MaxOpenNum: 200,
|
||||
MaxIdleNum: 20,
|
||||
})
|
||||
if err != nil {
|
||||
log.Crit("failed to init db", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
if deferErr := database.CloseDB(db); deferErr != nil {
|
||||
log.Error("failed to close db", "err", err)
|
||||
}
|
||||
}()
|
||||
|
||||
for i := startIndex; i <= endIndex; i++ {
|
||||
batchIndex := uint64(i)
|
||||
resultBytes, err := getBatchTask(db, batchIndex)
|
||||
if err != nil {
|
||||
log.Crit("failed to get batch task", "batchIndex", batchIndex, "err", err)
|
||||
continue
|
||||
}
|
||||
|
||||
outputFilename := fmt.Sprintf("batch_task_%d.json", batchIndex)
|
||||
if err = os.WriteFile(outputFilename, resultBytes, 0644); err != nil {
|
||||
log.Crit("failed to write output file", "filename", outputFilename, "err", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func getBatchTask(db *gorm.DB, batchIndex uint64) ([]byte, error) {
|
||||
batch, err := orm.NewBatch(db).GetBatchByIndex(context.Background(), batchIndex)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get batch hash by index: %d err: %w ", batchIndex, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
chunks, err := orm.NewChunk(db).GetChunksByBatchHash(context.Background(), batch.Hash)
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to get chunk proofs for batch task id: %s err: %w ", batch.Hash, err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var chunkProofs []message.ChunkProof
|
||||
var chunkInfos []*message.ChunkInfo
|
||||
for _, chunk := range chunks {
|
||||
fmt.Println("chunk index: ", chunk.Index)
|
||||
fmt.Print("chunk proof: ", chunk.Proof)
|
||||
proof := message.NewChunkProof("euclid")
|
||||
if encodeErr := json.Unmarshal(chunk.Proof, &proof); encodeErr != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetProofsByBatchHash unmarshal proof error: %w, batch hash: %v, chunk hash: %v", encodeErr, batch.Hash, chunk.Hash)
|
||||
}
|
||||
chunkProofs = append(chunkProofs, proof)
|
||||
|
||||
chunkInfo := message.ChunkInfo{
|
||||
ChainID: 534351,
|
||||
PrevStateRoot: common.HexToHash(chunk.ParentChunkStateRoot),
|
||||
PostStateRoot: common.HexToHash(chunk.StateRoot),
|
||||
WithdrawRoot: common.HexToHash(chunk.WithdrawRoot),
|
||||
DataHash: common.HexToHash(chunk.Hash),
|
||||
PrevMsgQueueHash: common.HexToHash(chunk.PrevL1MessageQueueHash),
|
||||
PostMsgQueueHash: common.HexToHash(chunk.PostL1MessageQueueHash),
|
||||
IsPadding: false,
|
||||
}
|
||||
if openvmProof, ok := proof.(*message.OpenVMChunkProof); ok {
|
||||
chunkInfo.InitialBlockNumber = openvmProof.MetaData.ChunkInfo.InitialBlockNumber
|
||||
chunkInfo.BlockCtxs = openvmProof.MetaData.ChunkInfo.BlockCtxs
|
||||
chunkInfo.TxDataLength = openvmProof.MetaData.ChunkInfo.TxDataLength
|
||||
}
|
||||
chunkInfos = append(chunkInfos, &chunkInfo)
|
||||
}
|
||||
|
||||
taskDetail, err := getBatchTaskDetail(batch, chunkInfos, chunkProofs)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get batch task detail, taskID:%s err:%w", batch.Hash, err)
|
||||
}
|
||||
|
||||
chunkProofsBytes, err := json.MarshalIndent(taskDetail, "", " ")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal chunk proofs, taskID:%s err:%w", batch.Hash, err)
|
||||
}
|
||||
|
||||
return chunkProofsBytes, nil
|
||||
}
|
||||
|
||||
func getBatchTaskDetail(dbBatch *orm.Batch, chunkInfos []*message.ChunkInfo, chunkProofs []message.ChunkProof) (*message.BatchTaskDetail, error) {
|
||||
taskDetail := &message.BatchTaskDetail{
|
||||
ChunkInfos: chunkInfos,
|
||||
ChunkProofs: chunkProofs,
|
||||
}
|
||||
|
||||
dbBatchCodecVersion := encoding.CodecVersion(dbBatch.CodecVersion)
|
||||
switch dbBatchCodecVersion {
|
||||
case encoding.CodecV3, encoding.CodecV4, encoding.CodecV6:
|
||||
default:
|
||||
return taskDetail, nil
|
||||
}
|
||||
|
||||
if dbBatchCodecVersion >= encoding.CodecV7 {
|
||||
taskDetail.ForkName = message.EuclidV2ForkNameForProver
|
||||
} else {
|
||||
taskDetail.ForkName = message.EuclidForkNameForProver
|
||||
}
|
||||
|
||||
codec, err := encoding.CodecFromVersion(encoding.CodecVersion(dbBatch.CodecVersion))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get codec from version %d, err: %w", dbBatch.CodecVersion, err)
|
||||
}
|
||||
|
||||
batchHeader, decodeErr := codec.NewDABatchFromBytes(dbBatch.BatchHeader)
|
||||
if decodeErr != nil {
|
||||
return nil, fmt.Errorf("failed to decode batch header version %d: %w", dbBatch.CodecVersion, decodeErr)
|
||||
}
|
||||
taskDetail.BatchHeader = batchHeader
|
||||
taskDetail.BlobBytes = dbBatch.BlobBytes
|
||||
|
||||
challengeDigest, kzgCommitment, kzgProof, err := codec.BlobDataProofFromBlobBytes(dbBatch.BlobBytes)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get challenge digest from blob bytes, taskID: %s, err: %w", dbBatch.Hash, err)
|
||||
}
|
||||
|
||||
taskDetail.ChallengeDigest = challengeDigest
|
||||
taskDetail.KzgProof = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(kzgProof[:]))}
|
||||
taskDetail.KzgCommitment = message.Byte48{Big: hexutil.Big(*new(big.Int).SetBytes(kzgCommitment[:]))}
|
||||
|
||||
return taskDetail, nil
|
||||
}
|
||||
@@ -89,13 +89,13 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "homestead",
|
||||
MinProverVersion: "v4.4.57",
|
||||
MinProverVersion: "v4.2.0",
|
||||
},
|
||||
HighVersionCircuit: &config.CircuitConfig{
|
||||
ParamsPath: "",
|
||||
AssetsPath: "",
|
||||
ForkName: "bernoulli",
|
||||
MinProverVersion: "v4.4.89",
|
||||
MinProverVersion: "v4.3.0",
|
||||
},
|
||||
},
|
||||
BatchCollectionTimeSec: 10,
|
||||
@@ -142,7 +142,7 @@ func setupCoordinator(t *testing.T, proversPerSession uint8, coordinatorURL stri
|
||||
func setEnv(t *testing.T) {
|
||||
var err error
|
||||
|
||||
version.Version = "v4.4.57"
|
||||
version.Version = "v4.2.0"
|
||||
|
||||
glogger := log.NewGlogHandler(log.StreamHandler(os.Stderr, log.LogfmtFormat()))
|
||||
glogger.Verbosity(log.LvlInfo)
|
||||
@@ -285,12 +285,14 @@ func testOutdatedProverVersion(t *testing.T) {
|
||||
batchProver := newMockProver(t, "prover_batch_test", coordinatorURL, message.ProofTypeBatch, "v1.999.999")
|
||||
assert.True(t, chunkProver.healthCheckSuccess(t))
|
||||
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", chunkProver.proverVersion)
|
||||
expectedErr := fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, chunkProver.proverVersion)
|
||||
code, errMsg := chunkProver.tryGetProverTask(t, message.ProofTypeChunk)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: v4.4.56, actual version: %s", batchProver.proverVersion)
|
||||
expectedErr = fmt.Errorf("check the login parameter failure: incompatible prover version. please upgrade your prover, minimum allowed version: %s, actual version: %s",
|
||||
conf.ProverManager.Verifier.LowVersionCircuit.MinProverVersion, batchProver.proverVersion)
|
||||
code, errMsg = batchProver.tryGetProverTask(t, message.ProofTypeBatch)
|
||||
assert.Equal(t, types.ErrJWTCommonErr, code)
|
||||
assert.Equal(t, expectedErr, errors.New(errMsg))
|
||||
|
||||
@@ -1357,8 +1357,7 @@ github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b h1:5H6V6yba
|
||||
github.com/scroll-tech/da-codec v0.1.1-0.20241014152913-2703f226fb0b/go.mod h1:48uxaqVgpD8ulH8p+nrBtfeLHZ9tX82bVVdPNkW3rPE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f h1:YYbhuUwjowqI4oyXtECRofck7Fyj18e1tcRjuQlZpJE=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250227072756-a1482833595f/go.mod h1:xECEHZLVzbdUn+tNbRJhRIjLGTOTmnFQuTgUTeVLX58=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493 h1:Ioc01J0WEMxuwFvEPGJeBKXdf2KY4Yc3XbFky/IxLlI=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250401062930-9f9f53898493/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/da-codec v0.1.3-0.20250327153440-cd3e5728df9c/go.mod h1:yhTS9OVC0xQGhg7DN5iV5KZJvnSIlFWAxDdp+6jxQtY=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240607130425-e2becce6a1a4/go.mod h1:byf/mZ8jLYUCnUePTicjJWn+RvKdxDn7buS6glTnMwQ=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20240821074444-b3fa00861e5e/go.mod h1:swB5NSp8pKNDuYsTxfR08bHS6L56i119PBx8fxvV8Cs=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20241010064814-3d88e870ae22/go.mod h1:r9FwtxCtybMkTbWYCyBuevT9TW3zHmOTHqD082Uh+Oo=
|
||||
|
||||
@@ -33,45 +33,3 @@ make rollup_bins
|
||||
./build/bin/gas_oracle --config ./conf/config.json
|
||||
./build/bin/rollup_relayer --config ./conf/config.json
|
||||
```
|
||||
|
||||
## Proposer Tool
|
||||
|
||||
The Proposer Tool replays historical blocks with custom configurations (e.g., future hardfork configs, custom chunk/batch/bundle proposer configs) to generate chunks/batches/bundles, helping test parameter changes before protocol upgrade.
|
||||
|
||||
You can:
|
||||
|
||||
1. Enable different hardforks in the genesis configuration.
|
||||
2. Set custom chunk-proposer, batch-proposer, and bundle-proposer parameters.
|
||||
3. Analyze resulting metrics (blob size, block count, transaction count, gas usage).
|
||||
|
||||
## How to run the proposer tool?
|
||||
|
||||
### Set the configs
|
||||
|
||||
1. Set genesis config to enable desired hardforks in [`proposer-tool-genesis.json`](./proposer-tool-genesis.json).
|
||||
2. Set proposer config in [`proposer-tool-config.json`](./proposer-tool-config.json) for data analysis.
|
||||
3. Set `start-l2-block` in the launch command of proposer-tool in [`docker-compose-proposer-tool.yml`](./docker-compose-proposer-tool.yml) to the block number you want to start from. The default is `0`, which means starting from the genesis block.
|
||||
|
||||
### Start the proposer tool using docker-compose
|
||||
|
||||
Prerequisite: an RPC URL to an archive L2 node. The default url in [`proposer-tool-config.json`](./proposer-tool-config.json) is `https://rpc.scroll.io`.
|
||||
|
||||
```
|
||||
cd rollup
|
||||
DOCKER_BUILDKIT=1 docker-compose -f docker-compose-proposer-tool.yml up -d
|
||||
```
|
||||
|
||||
> Note: The port 5432 of database is mapped to the host machine. You can use `psql` or any db clients to connect to the database.
|
||||
|
||||
> The DSN for the database is `postgres://postgres:postgres@db:5432/scroll?sslmode=disable`.
|
||||
|
||||
|
||||
### Reset env
|
||||
```
|
||||
docker-compose -f docker-compose-proposer-tool.yml down -v
|
||||
```
|
||||
|
||||
If you need to rebuild the images, removing the old images is necessary. You can do this by running the following command:
|
||||
```
|
||||
docker images | grep rollup | awk '{print $3}' | xargs docker rmi -f
|
||||
```
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
@@ -71,12 +72,22 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Crit("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, relayer.ServiceTypeL1GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false /* initGenesis */, relayer.ServiceTypeL2GasOracle, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
// Start l1 watcher process
|
||||
go utils.LoopWithContext(subCtx, 10*time.Second, func(ctx context.Context) {
|
||||
// Fetch the latest block number to decrease the delay when fetching gas prices
|
||||
@@ -95,6 +106,7 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessGasPriceOracle)
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessGasPriceOracle)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start gas-oracle successfully", "version", version.Version)
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/controller/watcher"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up proposer-tool app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "proposer-tool"
|
||||
app.Usage = "The Scroll Proposer Tool"
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Flags = append(app.Flags, utils.RollupRelayerFlags...)
|
||||
app.Flags = append(app.Flags, utils.ProposerToolFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfigForReplay(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
|
||||
startL2BlockHeight := ctx.Uint64(utils.StartL2BlockFlag.Name)
|
||||
|
||||
genesisPath := ctx.String(utils.Genesis.Name)
|
||||
genesis, err := utils.ReadGenesis(genesisPath)
|
||||
if err != nil {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
minCodecVersion := encoding.CodecVersion(ctx.Uint(utils.MinCodecVersionFlag.Name))
|
||||
|
||||
// sanity check config
|
||||
if cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch <= 0 {
|
||||
log.Crit("cfg.L2Config.BatchProposerConfig.MaxChunksPerBatch must be greater than 0")
|
||||
}
|
||||
if cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk <= 0 {
|
||||
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
|
||||
}
|
||||
|
||||
proposerTool, err := watcher.NewProposerTool(subCtx, cancel, cfg, startL2BlockHeight, minCodecVersion, genesis.Config)
|
||||
if err != nil {
|
||||
log.Crit("failed to create proposer tool", "startL2BlockHeight", startL2BlockHeight, "minCodecVersion", minCodecVersion, "error", err)
|
||||
}
|
||||
proposerTool.Start()
|
||||
|
||||
log.Info("Start proposer-tool successfully", "version", version.Version)
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
cancel()
|
||||
proposerTool.Stop()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run proposer tool cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/rollup/cmd/proposer_tool/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -79,6 +79,8 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("failed to read genesis", "genesis file", genesisPath, "error", err)
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
|
||||
// sanity check config
|
||||
if cfg.L2Config.RelayerConfig.BatchSubmission == nil {
|
||||
log.Crit("cfg.L2Config.RelayerConfig.BatchSubmission must not be nil")
|
||||
@@ -96,7 +98,7 @@ func action(ctx *cli.Context) error {
|
||||
log.Crit("cfg.L2Config.ChunkProposerConfig.MaxL2GasPerChunk must be greater than 0")
|
||||
}
|
||||
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, genesis.Config, initGenesis, relayer.ServiceTypeL2RollupRelayer, registry)
|
||||
if err != nil {
|
||||
log.Crit("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
version: '3'
|
||||
|
||||
services:
|
||||
db:
|
||||
image: postgres:14
|
||||
environment:
|
||||
- POSTGRES_USER=postgres
|
||||
- POSTGRES_PASSWORD=postgres
|
||||
- POSTGRES_DB=scroll
|
||||
ports:
|
||||
- "5432:5432"
|
||||
volumes:
|
||||
- postgres_data:/var/lib/postgresql/data
|
||||
healthcheck:
|
||||
test: ["CMD-SHELL", "pg_isready -U postgres"]
|
||||
interval: 5s
|
||||
timeout: 5s
|
||||
retries: 5
|
||||
|
||||
proposer-tool:
|
||||
build:
|
||||
context: ..
|
||||
dockerfile: ./rollup/proposer_tool.Dockerfile
|
||||
depends_on:
|
||||
db:
|
||||
condition: service_healthy
|
||||
command: [
|
||||
"--config", "/app/conf/proposer-tool-config.json",
|
||||
"--genesis", "/app/conf/proposer-tool-genesis.json",
|
||||
"--min-codec-version", "4",
|
||||
"--start-l2-block", "10000",
|
||||
"--log.debug", "--verbosity", "3"
|
||||
]
|
||||
volumes:
|
||||
- ./proposer-tool-config.json:/app/conf/proposer-tool-config.json
|
||||
- ./proposer-tool-genesis.json:/app/conf/proposer-tool-genesis.json
|
||||
restart: unless-stopped
|
||||
|
||||
volumes:
|
||||
postgres_data:
|
||||
@@ -1,10 +1,7 @@
|
||||
package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
@@ -23,11 +20,6 @@ type Config struct {
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
}
|
||||
|
||||
type ConfigForReplay struct {
|
||||
Config
|
||||
DBConfigForReplay *database.Config `json:"db_config_for_replay"`
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
v := viper.New()
|
||||
@@ -95,19 +87,3 @@ func NewConfig(file string) (*Config, error) {
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// NewConfigForReplay returns a new instance of ConfigForReplay.
|
||||
func NewConfigForReplay(file string) (*ConfigForReplay, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg := &ConfigForReplay{}
|
||||
err = json.Unmarshal(buf, cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -25,6 +25,6 @@ const (
|
||||
ServiceTypeL2RollupRelayer
|
||||
// ServiceTypeL1GasOracle indicates the service is a Layer 1 gas oracle.
|
||||
ServiceTypeL1GasOracle
|
||||
// ServiceTypeL2GasOracleDeprecated indicates the service is a Layer 2 gas oracle, which is deprecated.
|
||||
ServiceTypeL2GasOracleDeprecated
|
||||
// ServiceTypeL2GasOracle indicates the service is a Layer 2 gas oracle.
|
||||
ServiceTypeL2GasOracle
|
||||
)
|
||||
|
||||
@@ -167,7 +167,6 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
if r.lastBaseFee == r.cfg.GasOracleConfig.L1BaseFeeDefault && r.lastBlobBaseFee == r.cfg.GasOracleConfig.L1BlobBaseFeeDefault {
|
||||
return
|
||||
}
|
||||
log.Warn("The committing batch has been stuck for a long time, it's likely that the L1 gas fee spiked, set fees to default values", "currentBaseFee", baseFee, "currentBlobBaseFee", blobBaseFee, "threshold (min)", r.cfg.GasOracleConfig.L1BlobBaseFeeThreshold, "defaultBaseFee", r.cfg.GasOracleConfig.L1BaseFeeDefault, "defaultBlobBaseFee", r.cfg.GasOracleConfig.L1BlobBaseFeeDefault)
|
||||
baseFee = r.cfg.GasOracleConfig.L1BaseFeeDefault
|
||||
blobBaseFee = r.cfg.GasOracleConfig.L1BlobBaseFeeDefault
|
||||
} else if err != nil {
|
||||
@@ -251,11 +250,7 @@ func (r *Layer1Relayer) shouldUpdateGasOracle(baseFee uint64, blobBaseFee uint64
|
||||
return true
|
||||
}
|
||||
|
||||
expectedBaseFeeDelta := r.lastBaseFee * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
// Allowing a minimum of 0 wei if the gas price diff config is 0, this will be used to let the gas oracle send transactions continuously.
|
||||
if r.gasPriceDiff > 0 {
|
||||
expectedBaseFeeDelta += 1
|
||||
}
|
||||
expectedBaseFeeDelta := r.lastBaseFee*r.gasPriceDiff/gasPriceDiffPrecision + 1
|
||||
if baseFee >= r.minGasPrice && math.Abs(float64(baseFee)-float64(r.lastBaseFee)) >= float64(expectedBaseFeeDelta) {
|
||||
return true
|
||||
}
|
||||
@@ -283,7 +278,5 @@ func (r *Layer1Relayer) commitBatchReachTimeout() (bool, error) {
|
||||
}
|
||||
// len(batches) == 0 probably shouldn't ever happen, but need to check this
|
||||
// Also, we should check if it's a genesis batch. If so, skip the timeout check.
|
||||
// If finalizing/finalized status is updated before committed status, skip the timeout check of this round.
|
||||
// Because batches[0].CommittedAt is nil in this case, this will only continue for a short time window.
|
||||
return len(batches) == 0 || (batches[0].Index != 0 && batches[0].CommittedAt != nil && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
|
||||
return len(batches) == 0 || (batches[0].Index != 0 && utils.NowUTC().Sub(*batches[0].CommittedAt) > time.Duration(r.cfg.GasOracleConfig.CheckCommittedBatchesWindowMinutes)*time.Minute), nil
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
"math/big"
|
||||
"sort"
|
||||
"strings"
|
||||
@@ -53,7 +54,12 @@ type Layer2Relayer struct {
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
l2GasOracleABI *abi.ABI
|
||||
gasOracleSender *sender.Sender
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
// Used to get batch status from chain_monitor api.
|
||||
chainMonitorClient *resty.Client
|
||||
@@ -64,10 +70,22 @@ type Layer2Relayer struct {
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
var commitSender, finalizeSender *sender.Sender
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, chainCfg *params.ChainConfig, initGenesis bool, serviceType ServiceType, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
var gasOracleSender, commitSender, finalizeSender *sender.Sender
|
||||
var err error
|
||||
|
||||
switch serviceType {
|
||||
case ServiceTypeL2GasOracle:
|
||||
gasOracleSender, err = sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderSignerConfig, "l2_relayer", "gas_oracle_sender", types.SenderTypeL2GasOracle, db, reg)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("new gas oracle sender failed, err: %w", err)
|
||||
}
|
||||
|
||||
// Ensure test features aren't enabled on the ethereum mainnet.
|
||||
if gasOracleSender.GetChainID().Cmp(big.NewInt(1)) == 0 && cfg.EnableTestEnvBypassFeatures {
|
||||
return nil, errors.New("cannot enable test env features in mainnet")
|
||||
}
|
||||
|
||||
case ServiceTypeL2RollupRelayer:
|
||||
commitSenderAddr, err := addrFromSignerConfig(cfg.CommitSenderSignerConfig)
|
||||
if err != nil {
|
||||
@@ -100,6 +118,16 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("invalid service type for l2_relayer: %v", serviceType)
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
var gasPriceDiff uint64
|
||||
if cfg.GasOracleConfig != nil {
|
||||
minGasPrice = cfg.GasOracleConfig.MinGasPrice
|
||||
gasPriceDiff = cfg.GasOracleConfig.GasPriceDiff
|
||||
} else {
|
||||
minGasPrice = 0
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -115,7 +143,11 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
gasOracleSender: gasOracleSender,
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
chainCfg: chainCfg,
|
||||
@@ -129,12 +161,16 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
if err := layer2Relayer.initializeGenesis(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
if initGenesis {
|
||||
if err := layer2Relayer.initializeGenesis(); err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
layer2Relayer.metrics = initL2RelayerMetrics(reg)
|
||||
|
||||
switch serviceType {
|
||||
case ServiceTypeL2GasOracle:
|
||||
go layer2Relayer.handleL2GasOracleConfirmLoop(ctx)
|
||||
case ServiceTypeL2RollupRelayer:
|
||||
go layer2Relayer.handleL2RollupRelayerConfirmLoop(ctx)
|
||||
default:
|
||||
@@ -265,6 +301,80 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.rollupL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if types.GasOracleStatus(batch.OracleStatus) == types.GasOraclePending {
|
||||
suggestGasPrice, err := r.l2Client.SuggestGasPrice(r.ctx)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch SuggestGasPrice from l2geth", "err", err)
|
||||
return
|
||||
}
|
||||
suggestGasPriceUint64 := uint64(suggestGasPrice.Int64())
|
||||
|
||||
// include the token exchange rate in the fee data if alternative gas token enabled
|
||||
if r.cfg.GasOracleConfig.AlternativeGasTokenConfig != nil && r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Enabled {
|
||||
// The exchange rate represent the number of native token on L1 required to exchange for 1 native token on L2.
|
||||
var exchangeRate float64
|
||||
switch r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode {
|
||||
case "Fixed":
|
||||
exchangeRate = r.cfg.GasOracleConfig.AlternativeGasTokenConfig.FixedExchangeRate
|
||||
case "BinanceApi":
|
||||
exchangeRate, err = rutils.GetExchangeRateFromBinanceApi(r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, 5)
|
||||
if err != nil {
|
||||
log.Error("Failed to get gas token exchange rate from Binance api", "tokenSymbolPair", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.TokenSymbolPair, "err", err)
|
||||
return
|
||||
}
|
||||
default:
|
||||
log.Error("Invalid alternative gas token mode", "mode", r.cfg.GasOracleConfig.AlternativeGasTokenConfig.Mode)
|
||||
return
|
||||
}
|
||||
if exchangeRate == 0 {
|
||||
log.Error("Invalid exchange rate", "exchangeRate", exchangeRate)
|
||||
return
|
||||
}
|
||||
suggestGasPriceUint64 = uint64(math.Ceil(float64(suggestGasPriceUint64) * exchangeRate))
|
||||
suggestGasPrice = new(big.Int).SetUint64(suggestGasPriceUint64)
|
||||
}
|
||||
|
||||
expectedDelta := r.lastGasPrice * r.gasPriceDiff / gasPriceDiffPrecision
|
||||
if r.lastGasPrice > 0 && expectedDelta == 0 {
|
||||
expectedDelta = 1
|
||||
}
|
||||
|
||||
// last is undefined or (suggestGasPriceUint64 >= minGasPrice && exceed diff)
|
||||
if r.lastGasPrice == 0 || (suggestGasPriceUint64 >= r.minGasPrice &&
|
||||
(math.Abs(float64(suggestGasPriceUint64)-float64(r.lastGasPrice)) >= float64(expectedDelta))) {
|
||||
data, err := r.l2GasOracleABI.Pack("setL2BaseFee", suggestGasPrice)
|
||||
if err != nil {
|
||||
log.Error("Failed to pack setL2BaseFee", "batch.Hash", batch.Hash, "GasPrice", suggestGasPrice.Uint64(), "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
hash, err := r.gasOracleSender.SendTransaction(batch.Hash, &r.cfg.GasPriceOracleContractAddress, data, nil, 0)
|
||||
if err != nil {
|
||||
log.Error("Failed to send setL2BaseFee tx to layer2 ", "batch.Hash", batch.Hash, "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
err = r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batch.Hash, types.GasOracleImporting, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateGasOracleStatusAndOracleTxHash failed", "batch.Hash", batch.Hash, "err", err)
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = suggestGasPriceUint64
|
||||
r.metrics.rollupL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
@@ -930,6 +1040,22 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
|
||||
if err != nil {
|
||||
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
case types.SenderTypeL2GasOracle:
|
||||
batchHash := cfm.ContextID
|
||||
var status types.GasOracleStatus
|
||||
if cfm.IsSuccessful {
|
||||
status = types.GasOracleImported
|
||||
r.metrics.rollupL2UpdateGasOracleConfirmedTotal.Inc()
|
||||
} else {
|
||||
status = types.GasOracleImportedFailed
|
||||
r.metrics.rollupL2UpdateGasOracleConfirmedFailedTotal.Inc()
|
||||
log.Warn("UpdateGasOracleTxType transaction confirmed but failed in layer1", "confirmation", cfm)
|
||||
}
|
||||
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, batchHash, status, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateL2GasOracleStatusAndOracleTxHash failed", "confirmation", cfm, "err", err)
|
||||
}
|
||||
default:
|
||||
log.Warn("Unknown transaction type", "confirmation", cfm)
|
||||
}
|
||||
@@ -937,6 +1063,17 @@ func (r *Layer2Relayer) handleConfirmation(cfm *sender.Confirmation) {
|
||||
log.Info("Transaction confirmed in layer1", "confirmation", cfm)
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleL2GasOracleConfirmLoop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.handleConfirmation(cfm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleL2RollupRelayerConfirmLoop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
@@ -1111,6 +1248,10 @@ func (r *Layer2Relayer) constructFinalizeBundlePayloadCodecV7(dbBatch *orm.Batch
|
||||
// StopSenders stops the senders of the rollup-relayer to prevent querying the removed pending_transaction table in unit tests.
|
||||
// for unit test
|
||||
func (r *Layer2Relayer) StopSenders() {
|
||||
if r.gasOracleSender != nil {
|
||||
r.gasOracleSender.Stop()
|
||||
}
|
||||
|
||||
if r.commitSender != nil {
|
||||
r.commitSender.Stop()
|
||||
}
|
||||
|
||||
@@ -12,10 +12,14 @@ type l2RelayerMetrics struct {
|
||||
rollupL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBatchErrTooManyPendingBlobTxsTotal prometheus.Counter
|
||||
rollupL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
rollupL2RelayerLastGasPrice prometheus.Gauge
|
||||
rollupL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesCommittedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
rollupL2BatchesFinalizedConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedTotal prometheus.Counter
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedCall prometheus.Counter
|
||||
rollupL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
|
||||
rollupL2RelayerProcessPendingBundlesTotal prometheus.Counter
|
||||
@@ -52,6 +56,14 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
Name: "rollup_layer2_process_pending_batch_err_too_many_pending_blob_txs_total",
|
||||
Help: "The total number of layer2 process pending batch failed on too many pending blob txs",
|
||||
}),
|
||||
rollupL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
}),
|
||||
rollupL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "rollup_layer2_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of rollup relayer l2",
|
||||
}),
|
||||
rollupL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_process_committed_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process committed batches confirmed total",
|
||||
@@ -68,6 +80,14 @@ func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
Name: "rollup_layer2_process_finalized_batches_confirmed_failed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed failed total",
|
||||
}),
|
||||
rollupL2UpdateGasOracleConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_total",
|
||||
Help: "The total number of updating layer2 gas oracle confirmed",
|
||||
}),
|
||||
rollupL2UpdateGasOracleConfirmedFailedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_update_layer1_gas_oracle_confirmed_failed_total",
|
||||
Help: "The total number of updating layer2 gas oracle confirmed failed",
|
||||
}),
|
||||
rollupL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "rollup_layer2_chain_monitor_latest_failed_batch_call",
|
||||
Help: "The total number of failed call chain_monitor api",
|
||||
|
||||
@@ -2,6 +2,7 @@ package relayer
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
@@ -13,7 +14,9 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto/kzg4844"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -41,7 +44,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
@@ -61,7 +64,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
assert.Fail(t, "unsupported codec version, expected CodecV4")
|
||||
}
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(l2Cli, "SendTransaction", func(_ context.Context, _ *gethTypes.Transaction) error {
|
||||
@@ -110,7 +113,7 @@ func testL2RelayerProcessPendingBundles(t *testing.T) {
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch := &encoding.Batch{
|
||||
@@ -178,7 +181,7 @@ func testL2RelayerFinalizeTimeoutBundles(t *testing.T) {
|
||||
if codecVersion == encoding.CodecV4 {
|
||||
chainConfig = ¶ms.ChainConfig{LondonBlock: big.NewInt(0), BernoulliBlock: big.NewInt(0), CurieBlock: big.NewInt(0), DarwinTime: new(uint64), DarwinV2Time: new(uint64)}
|
||||
}
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, chainConfig, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -254,7 +257,7 @@ func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -310,7 +313,7 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
@@ -371,6 +374,149 @@ func testL2RelayerFinalizeBundleConfirm(t *testing.T) {
|
||||
}, 5*time.Second, 100*time.Millisecond, "Bundle or Batch status did not update as expected")
|
||||
}
|
||||
|
||||
func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
batch1 := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk1},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
dbBatch1, err := batchOrm.InsertBatch(context.Background(), batch1, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2 := &encoding.Batch{
|
||||
Index: batch1.Index + 1,
|
||||
TotalL1MessagePoppedBefore: batch1.TotalL1MessagePoppedBefore,
|
||||
ParentBatchHash: common.HexToHash(dbBatch1.Hash),
|
||||
Chunks: []*encoding.Chunk{chunk2},
|
||||
}
|
||||
|
||||
dbBatch2, err := batchOrm.InsertBatch(context.Background(), batch2, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// Simulate message confirmations.
|
||||
type BatchConfirmation struct {
|
||||
batchHash string
|
||||
isSuccessful bool
|
||||
}
|
||||
|
||||
confirmations := []BatchConfirmation{
|
||||
{batchHash: dbBatch1.Hash, isSuccessful: true},
|
||||
{batchHash: dbBatch2.Hash, isSuccessful: false},
|
||||
}
|
||||
|
||||
for _, confirmation := range confirmations {
|
||||
l2Relayer.gasOracleSender.SendConfirmation(&sender.Confirmation{
|
||||
ContextID: confirmation.batchHash,
|
||||
IsSuccessful: confirmation.isSuccessful,
|
||||
SenderType: types.SenderTypeL2GasOracle,
|
||||
})
|
||||
}
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.GasOracleStatus{types.GasOracleImported, types.GasOracleImportedFailed}
|
||||
for i, confirmation := range confirmations {
|
||||
gasOracle, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": confirmation.batchHash}, nil, 0)
|
||||
if err != nil || len(gasOracle) != 1 || types.GasOracleStatus(gasOracle[0].OracleStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, false, ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
var batchOrm *orm.Batch
|
||||
convey.Convey("Failed to GetLatestBatch", t, func() {
|
||||
targetErr := errors.New("GetLatestBatch error")
|
||||
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard := gomonkey.ApplyMethodFunc(batchOrm, "GetLatestBatch", func(context.Context) (*orm.Batch, error) {
|
||||
batch := orm.Batch{
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
Hash: "0x0000000000000000000000000000000000000000",
|
||||
}
|
||||
return &batch, nil
|
||||
})
|
||||
defer patchGuard.Reset()
|
||||
|
||||
convey.Convey("Failed to fetch SuggestGasPrice from l2geth", t, func() {
|
||||
targetErr := errors.New("SuggestGasPrice error")
|
||||
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.l2Client, "SuggestGasPrice", func(ctx context.Context) (*big.Int, error) {
|
||||
return big.NewInt(100), nil
|
||||
})
|
||||
|
||||
convey.Convey("Failed to pack setL2BaseFee", t, func() {
|
||||
targetErr := errors.New("setL2BaseFee error")
|
||||
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.l2GasOracleABI, "Pack", func(name string, args ...interface{}) ([]byte, error) {
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
convey.Convey("Failed to send setL2BaseFee tx to layer2", t, func() {
|
||||
targetErr := errors.New("failed to send setL2BaseFee tx to layer2 error")
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.Hash{}, targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(relayer.gasOracleSender, "SendTransaction", func(ContextID string, target *common.Address, data []byte, blob *kzg4844.Blob, fallbackGasLimit uint64) (hash common.Hash, err error) {
|
||||
return common.HexToHash("0x56789abcdef1234"), nil
|
||||
})
|
||||
|
||||
convey.Convey("UpdateGasOracleStatusAndOracleTxHash failed", t, func() {
|
||||
targetErr := errors.New("UpdateL2GasOracleStatusAndOracleTxHash error")
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
return targetErr
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
})
|
||||
|
||||
patchGuard.ApplyMethodFunc(batchOrm, "UpdateL2GasOracleStatusAndOracleTxHash", func(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
return nil
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
@@ -393,7 +539,7 @@ func testGetBatchStatusByIndex(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
cfg.L2Config.RelayerConfig.ChainMonitor.Enabled = true
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, ServiceTypeL2RollupRelayer, nil)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, ¶ms.ChainConfig{}, true, ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
defer relayer.StopSenders()
|
||||
|
||||
@@ -128,6 +128,8 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestL2RelayerFinalizeTimeoutBundles", testL2RelayerFinalizeTimeoutBundles)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeBundleConfirm", testL2RelayerFinalizeBundleConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
|
||||
// test getBatchStatusByIndex
|
||||
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
|
||||
|
||||
@@ -13,8 +13,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
"scroll-tech/rollup/internal/utils"
|
||||
@@ -36,7 +34,6 @@ type BatchProposer struct {
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
maxChunksPerBatch int
|
||||
|
||||
replayMode bool
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
@@ -83,7 +80,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
maxChunksPerBatch: cfg.MaxChunksPerBatch,
|
||||
replayMode: false,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -156,14 +152,6 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, minC
|
||||
return p
|
||||
}
|
||||
|
||||
// SetReplayDB sets the replay database for the BatchProposer.
|
||||
// This is used for the proposer tool only, to change the l2_block data source.
|
||||
// This function is not thread-safe and should be called after initializing the BatchProposer and before starting to propose chunks.
|
||||
func (p *BatchProposer) SetReplayDB(replayDB *gorm.DB) {
|
||||
p.l2BlockOrm = orm.NewL2Block(replayDB)
|
||||
p.replayMode = true
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
p.batchProposerCircleTotal.Inc()
|
||||
@@ -238,15 +226,6 @@ func (p *BatchProposer) updateDBBatchInfo(batch *encoding.Batch, codecVersion en
|
||||
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", dbBatch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
if p.replayMode {
|
||||
// If replayMode is true, meaning the batch was proposed by the proposer tool,
|
||||
// set batch status to types.RollupCommitted and assign a unique commit tx hash to enable new bundle proposals.
|
||||
if dbErr = p.batchOrm.UpdateCommitTxHashAndRollupStatus(p.ctx, dbBatch.Hash, dbBatch.Hash, types.RollupCommitted, dbTX); dbErr != nil {
|
||||
log.Warn("BatchProposer.UpdateCommitTxHashAndRollupStatus update the batch's commit tx hash failure", "hash", dbBatch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
})
|
||||
if err != nil {
|
||||
|
||||
@@ -199,7 +199,7 @@ func (p *BundleProposer) proposeBundle() error {
|
||||
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if firstChunk.StartBlockTime+p.bundleTimeoutSec < currentTimeSec {
|
||||
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "bundle timeout", p.bundleTimeoutSec, "current time", currentTimeSec)
|
||||
log.Info("first block timeout", "batch count", len(batches), "start block number", firstChunk.StartBlockNumber, "start block timestamp", firstChunk.StartBlockTime, "current time", currentTimeSec)
|
||||
|
||||
batches, err = p.allBatchesCommittedInSameTXIncluded(batches)
|
||||
if err != nil {
|
||||
|
||||
@@ -36,7 +36,6 @@ type ChunkProposer struct {
|
||||
gasCostIncreaseMultiplier float64
|
||||
maxUncompressedBatchBytesSize uint64
|
||||
|
||||
replayMode bool
|
||||
minCodecVersion encoding.CodecVersion
|
||||
chainCfg *params.ChainConfig
|
||||
|
||||
@@ -92,7 +91,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
maxUncompressedBatchBytesSize: cfg.MaxUncompressedBatchBytesSize,
|
||||
replayMode: false,
|
||||
minCodecVersion: minCodecVersion,
|
||||
chainCfg: chainCfg,
|
||||
|
||||
@@ -177,14 +175,6 @@ func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, minC
|
||||
return p
|
||||
}
|
||||
|
||||
// SetReplayDB sets the replay database for the ChunkProposer.
|
||||
// This is used for the proposer tool only, to change the l2_block data source.
|
||||
// This function is not thread-safe and should be called after initializing the ChunkProposer and before starting to propose chunks.
|
||||
func (p *ChunkProposer) SetReplayDB(replayDB *gorm.DB) {
|
||||
p.l2BlockOrm = orm.NewL2Block(replayDB)
|
||||
p.replayMode = true
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
p.chunkProposerCircleTotal.Inc()
|
||||
@@ -251,12 +241,9 @@ func (p *ChunkProposer) updateDBChunkInfo(chunk *encoding.Chunk, codecVersion en
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "codec version", codecVersion, "err", err)
|
||||
return err
|
||||
}
|
||||
// In replayMode we don't need to update chunk_hash in l2_block table.
|
||||
if !p.replayMode {
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
log.Error("failed to update chunk_hash for l2_block", "chunk hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber, "err", err)
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", dbChunk.Hash, "start block", dbChunk.StartBlockNumber, "end block", dbChunk.EndBlockNumber, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
@@ -449,12 +436,6 @@ func (p *ChunkProposer) recordTimerChunkMetrics(metrics *utils.ChunkMetrics) {
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) tryProposeEuclidTransitionChunk(blocks []*encoding.Block) (bool, error) {
|
||||
// If we are in replay mode, there is a corner case when StartL2Block is set as 0 in this check,
|
||||
// it needs to get genesis block, but in mainnet db there is no genesis block, so we need to bypass this check.
|
||||
if p.replayMode {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
if !p.chainCfg.IsEuclid(blocks[0].Header.Time) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -1,160 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
"scroll-tech/rollup/internal/config"
|
||||
"scroll-tech/rollup/internal/orm"
|
||||
rutils "scroll-tech/rollup/internal/utils"
|
||||
)
|
||||
|
||||
// ProposerTool is a tool for proposing chunks and bundles to the L1 chain.
|
||||
type ProposerTool struct {
|
||||
ctx context.Context
|
||||
cancel context.CancelFunc
|
||||
|
||||
db *gorm.DB
|
||||
dbForReplay *gorm.DB
|
||||
client *ethclient.Client
|
||||
|
||||
chunkProposer *ChunkProposer
|
||||
batchProposer *BatchProposer
|
||||
bundleProposer *BundleProposer
|
||||
}
|
||||
|
||||
// NewProposerTool creates a new ProposerTool instance.
|
||||
func NewProposerTool(ctx context.Context, cancel context.CancelFunc, cfg *config.ConfigForReplay, startL2BlockHeight uint64, minCodecVersion encoding.CodecVersion, chainCfg *params.ChainConfig) (*ProposerTool, error) {
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init db connection: %w", err)
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get db connection: %w", err)
|
||||
}
|
||||
if err = migrate.ResetDB(sqlDB); err != nil {
|
||||
return nil, fmt.Errorf("failed to reset db: %w", err)
|
||||
}
|
||||
log.Info("successfully reset db")
|
||||
|
||||
// Init dbForReplay connection
|
||||
dbForReplay, err := database.InitDB(cfg.DBConfigForReplay)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to init dbForReplay connection: %w", err)
|
||||
}
|
||||
|
||||
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to connect to L2 geth, endpoint: %s, err: %w", cfg.L2Config.Endpoint, err)
|
||||
}
|
||||
|
||||
prevChunk, err := orm.NewChunk(dbForReplay).GetParentChunkByBlockNumber(ctx, startL2BlockHeight)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get previous chunk: %w", err)
|
||||
}
|
||||
|
||||
var startQueueIndex uint64
|
||||
if prevChunk != nil {
|
||||
startQueueIndex = prevChunk.TotalL1MessagesPoppedBefore + prevChunk.TotalL1MessagesPoppedInChunk
|
||||
}
|
||||
|
||||
startBlock := uint64(0)
|
||||
if prevChunk != nil {
|
||||
startBlock = prevChunk.EndBlockNumber + 1
|
||||
}
|
||||
|
||||
var chunk *encoding.Chunk
|
||||
for blockNum := startBlock; blockNum <= startL2BlockHeight; blockNum++ {
|
||||
block, err := client.BlockByNumber(ctx, new(big.Int).SetUint64(blockNum))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get block %d: %w", blockNum, err)
|
||||
}
|
||||
|
||||
for _, tx := range block.Transactions() {
|
||||
if tx.Type() == gethTypes.L1MessageTxType {
|
||||
startQueueIndex++
|
||||
}
|
||||
}
|
||||
|
||||
if blockNum == startL2BlockHeight {
|
||||
chunk = &encoding.Chunk{Blocks: []*encoding.Block{{Header: block.Header()}}}
|
||||
}
|
||||
}
|
||||
|
||||
// Setting empty hash as the post_l1_message_queue_hash of the first chunk,
|
||||
// i.e., treating the first L1 message after this chunk as the first L1 message in message queue v2.
|
||||
// Though this setting is different from mainnet, it's simple yet sufficient for data analysis usage.
|
||||
_, err = orm.NewChunk(db).InsertTestChunkForProposerTool(ctx, chunk, minCodecVersion, startQueueIndex)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to insert chunk, minCodecVersion: %d, startQueueIndex: %d, err: %w", minCodecVersion, startQueueIndex, err)
|
||||
}
|
||||
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
var dbBatch *orm.Batch
|
||||
dbBatch, err = orm.NewBatch(db).InsertBatch(ctx, batch, encoding.CodecV0, rutils.BatchMetrics{})
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to insert batch: %w", err)
|
||||
}
|
||||
|
||||
if err = orm.NewChunk(db).UpdateBatchHashInRange(ctx, 0, 0, dbBatch.Hash); err != nil {
|
||||
return nil, fmt.Errorf("failed to update batch hash for chunks: %w", err)
|
||||
}
|
||||
|
||||
chunkProposer := NewChunkProposer(ctx, cfg.L2Config.ChunkProposerConfig, minCodecVersion, chainCfg, db, nil)
|
||||
chunkProposer.SetReplayDB(dbForReplay)
|
||||
batchProposer := NewBatchProposer(ctx, cfg.L2Config.BatchProposerConfig, minCodecVersion, chainCfg, db, nil)
|
||||
batchProposer.SetReplayDB(dbForReplay)
|
||||
bundleProposer := NewBundleProposer(ctx, cfg.L2Config.BundleProposerConfig, minCodecVersion, chainCfg, db, nil)
|
||||
|
||||
return &ProposerTool{
|
||||
ctx: ctx,
|
||||
cancel: cancel,
|
||||
|
||||
db: db,
|
||||
dbForReplay: dbForReplay,
|
||||
client: client,
|
||||
|
||||
chunkProposer: chunkProposer,
|
||||
batchProposer: batchProposer,
|
||||
bundleProposer: bundleProposer,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *ProposerTool) Start() {
|
||||
go utils.Loop(p.ctx, 100*time.Millisecond, p.chunkProposer.TryProposeChunk)
|
||||
go utils.Loop(p.ctx, 100*time.Millisecond, p.batchProposer.TryProposeBatch)
|
||||
go utils.Loop(p.ctx, 100*time.Millisecond, p.bundleProposer.TryProposeBundle)
|
||||
}
|
||||
|
||||
func (p *ProposerTool) Stop() {
|
||||
p.cancel()
|
||||
if err := database.CloseDB(p.db); err != nil {
|
||||
log.Error("failed to close db connection", "error", err)
|
||||
}
|
||||
if err := database.CloseDB(p.dbForReplay); err != nil {
|
||||
log.Error("failed to close dbForReplay connection", "error", err)
|
||||
}
|
||||
p.client.Close()
|
||||
}
|
||||
@@ -56,6 +56,10 @@ type Batch struct {
|
||||
FinalizeTxHash string `json:"finalize_tx_hash" gorm:"column:finalize_tx_hash;default:NULL"`
|
||||
FinalizedAt *time.Time `json:"finalized_at" gorm:"column:finalized_at;default:NULL"`
|
||||
|
||||
// gas oracle
|
||||
OracleStatus int16 `json:"oracle_status" gorm:"column:oracle_status;default:1"`
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// blob
|
||||
BlobDataProof []byte `json:"blob_data_proof" gorm:"column:blob_data_proof"`
|
||||
BlobSize uint64 `json:"blob_size" gorm:"column:blob_size"`
|
||||
@@ -162,8 +166,7 @@ func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error)
|
||||
return latestBatch.EndChunkIndex + 1, nil
|
||||
}
|
||||
|
||||
// GetCommittedBatchesGEIndexGECodecVersion retrieves batches that have been committed (commit_tx_hash is set) and not finalized (finalize_tx_hash is NULL).
|
||||
// It returns batches that have an index greater than or equal to the given index and codec version.
|
||||
// GetCommittedBatchesGEIndexGECodecVersion retrieves batches that have been committed (commit_tx_hash is set) and have a batch index greater than or equal to the given index and codec version.
|
||||
// The returned batches are sorted in ascending order by their index.
|
||||
func (o *Batch) GetCommittedBatchesGEIndexGECodecVersion(ctx context.Context, index uint64, codecv encoding.CodecVersion, limit int) ([]*Batch, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -171,7 +174,6 @@ func (o *Batch) GetCommittedBatchesGEIndexGECodecVersion(ctx context.Context, in
|
||||
db = db.Where("index >= ?", index)
|
||||
db = db.Where("codec_version >= ?", codecv)
|
||||
db = db.Where("commit_tx_hash IS NOT NULL") // only include committed batches
|
||||
db = db.Where("finalize_tx_hash IS NULL") // exclude finalized batches
|
||||
db = db.Order("index ASC")
|
||||
|
||||
if limit > 0 {
|
||||
@@ -308,6 +310,7 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: metrics.L1CommitGas,
|
||||
TotalL1CommitCalldataSize: metrics.L1CommitCalldataSize,
|
||||
BlobDataProof: batchMeta.BatchBlobDataProof,
|
||||
@@ -328,6 +331,22 @@ func (o *Batch) InsertBatch(ctx context.Context, batch *encoding.Batch, codecVer
|
||||
return &newBatch, nil
|
||||
}
|
||||
|
||||
// UpdateL2GasOracleStatusAndOracleTxHash updates the L2 gas oracle status and transaction hash for a batch.
|
||||
func (o *Batch) UpdateL2GasOracleStatusAndOracleTxHash(ctx context.Context, hash string, status types.GasOracleStatus, txHash string) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["oracle_status"] = int(status)
|
||||
updateFields["oracle_tx_hash"] = txHash
|
||||
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
if err := db.Updates(updateFields).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateL2GasOracleStatusAndOracleTxHash error: %w, batch hash: %v, status: %v, txHash: %v", err, hash, status.String(), txHash)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a batch.
|
||||
func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
@@ -386,12 +405,7 @@ func (o *Batch) UpdateRollupStatus(ctx context.Context, hash string, status type
|
||||
func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash string, commitTxHash string, status types.RollupStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
updateFields["commit_tx_hash"] = commitTxHash
|
||||
updateFields["rollup_status"] = gorm.Expr(
|
||||
`CASE
|
||||
WHEN rollup_status NOT IN (?, ?) THEN ?
|
||||
ELSE rollup_status
|
||||
END`,
|
||||
types.RollupFinalizing, types.RollupFinalized, int(status))
|
||||
updateFields["rollup_status"] = int(status)
|
||||
if status == types.RollupCommitted {
|
||||
updateFields["committed_at"] = utils.NowUTC()
|
||||
}
|
||||
@@ -402,6 +416,15 @@ func (o *Batch) UpdateCommitTxHashAndRollupStatus(ctx context.Context, hash stri
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
|
||||
var currentBatch Batch
|
||||
if err := db.Where("hash", hash).First(¤tBatch).Error; err != nil {
|
||||
return fmt.Errorf("Batch.UpdateCommitTxHashAndRollupStatus error when querying current status: %w, batch hash: %v", err, hash)
|
||||
}
|
||||
|
||||
if types.RollupStatus(currentBatch.RollupStatus) == types.RollupFinalizing || types.RollupStatus(currentBatch.RollupStatus) == types.RollupFinalized {
|
||||
return nil
|
||||
}
|
||||
|
||||
db = db.Model(&Batch{})
|
||||
db = db.Where("hash", hash)
|
||||
|
||||
|
||||
@@ -179,25 +179,6 @@ func (o *Chunk) GetChunksByBatchHash(ctx context.Context, batchHash string) ([]*
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetParentChunkByBlockNumber retrieves the parent chunk by block number
|
||||
// only used by proposer tool for analysis usage
|
||||
func (o *Chunk) GetParentChunkByBlockNumber(ctx context.Context, blockNumber uint64) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("end_block_number < ?", blockNumber)
|
||||
db = db.Order("end_block_number DESC")
|
||||
db = db.Limit(1)
|
||||
|
||||
var chunk Chunk
|
||||
if err := db.First(&chunk).Error; err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, fmt.Errorf("Chunk.GetParentChunkByBlockNumber error: %w", err)
|
||||
}
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, metrics rutils.ChunkMetrics, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
@@ -278,51 +259,6 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *encoding.Chunk, codecVer
|
||||
return &newChunk, nil
|
||||
}
|
||||
|
||||
// InsertTestChunkForProposerTool inserts a new chunk into the database only for analysis usage by proposer tool.
|
||||
func (o *Chunk) InsertTestChunkForProposerTool(ctx context.Context, chunk *encoding.Chunk, codecVersion encoding.CodecVersion, totalL1MessagePoppedBefore uint64, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
|
||||
chunkHash, err := rutils.GetChunkHash(chunk, totalL1MessagePoppedBefore, codecVersion)
|
||||
if err != nil {
|
||||
log.Error("failed to get chunk hash", "err", err)
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w", err)
|
||||
}
|
||||
|
||||
numBlocks := len(chunk.Blocks)
|
||||
firstBlock := chunk.Blocks[0]
|
||||
lastBlock := chunk.Blocks[numBlocks-1]
|
||||
newChunk := Chunk{
|
||||
Index: 0,
|
||||
Hash: chunkHash.Hex(),
|
||||
StartBlockNumber: firstBlock.Header.Number.Uint64(),
|
||||
StartBlockHash: firstBlock.Header.Hash().Hex(),
|
||||
EndBlockNumber: lastBlock.Header.Number.Uint64(),
|
||||
EndBlockHash: lastBlock.Header.Hash().Hex(),
|
||||
TotalL2TxGas: chunk.TotalGasUsed(),
|
||||
TotalL2TxNum: chunk.NumL2Transactions(),
|
||||
StartBlockTime: firstBlock.Header.Time,
|
||||
TotalL1MessagesPoppedBefore: totalL1MessagePoppedBefore,
|
||||
StateRoot: lastBlock.Header.Root.Hex(),
|
||||
WithdrawRoot: lastBlock.WithdrawRoot.Hex(),
|
||||
CodecVersion: int16(codecVersion),
|
||||
}
|
||||
|
||||
db := o.db
|
||||
if len(dbTX) > 0 && dbTX[0] != nil {
|
||||
db = dbTX[0]
|
||||
}
|
||||
db = db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
|
||||
if err := db.Create(&newChunk).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.InsertChunk error: %w, chunk hash: %v", err, newChunk.Hash)
|
||||
}
|
||||
|
||||
return &newChunk, nil
|
||||
}
|
||||
|
||||
// UpdateProvingStatus updates the proving status of a chunk.
|
||||
func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status types.ProvingStatus, dbTX ...*gorm.DB) error {
|
||||
updateFields := make(map[string]interface{})
|
||||
|
||||
@@ -300,12 +300,16 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batchHash2, types.RollupFinalized)
|
||||
assert.NoError(t, err)
|
||||
err = batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(context.Background(), batchHash2, types.GasOracleImported, "oracleTxHash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
updatedBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, types.ProvingTaskVerified, types.ProvingStatus(updatedBatch.ProvingStatus))
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
assert.Equal(t, types.GasOracleImported, types.GasOracleStatus(updatedBatch.OracleStatus))
|
||||
assert.Equal(t, "oracleTxHash", updatedBatch.OracleTxHash)
|
||||
|
||||
err = batchOrm.UpdateCommitTxHashAndRollupStatus(context.Background(), batchHash1, "commitTxHash", types.RollupCommitted)
|
||||
assert.NoError(t, err)
|
||||
@@ -314,7 +318,7 @@ func TestBatchOrm(t *testing.T) {
|
||||
updatedBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, updatedBatch)
|
||||
assert.Equal(t, "commitTxHash", updatedBatch.CommitTxHash)
|
||||
assert.Equal(t, "", updatedBatch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupFinalized, types.RollupStatus(updatedBatch.RollupStatus))
|
||||
|
||||
err = batchOrm.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchHash2, "finalizeTxHash", types.RollupFinalizeFailed)
|
||||
@@ -355,7 +359,10 @@ func TestBatchOrm(t *testing.T) {
|
||||
|
||||
batches, err = batchOrm.GetCommittedBatchesGEIndexGECodecVersion(context.Background(), 0, codecVersion, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 0, len(batches))
|
||||
assert.Equal(t, 1, len(batches))
|
||||
assert.Equal(t, batchHash1, batches[0].Hash)
|
||||
assert.Equal(t, types.ProvingTaskFailed, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
assert.Equal(t, types.RollupCommitFailed, types.RollupStatus(batches[0].RollupStatus))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
{
|
||||
"l2_config": {
|
||||
"endpoint": "https://rpc.scroll.io",
|
||||
"chunk_proposer_config": {
|
||||
"max_block_num_per_chunk": 100,
|
||||
"max_tx_num_per_chunk": 100,
|
||||
"max_l2_gas_per_chunk": 20000000,
|
||||
"max_l1_commit_gas_per_chunk": 5000000,
|
||||
"max_l1_commit_calldata_size_per_chunk": 123740,
|
||||
"chunk_timeout_sec": 72000000000,
|
||||
"max_row_consumption_per_chunk": 10000000000,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634693
|
||||
},
|
||||
"batch_proposer_config": {
|
||||
"max_l1_commit_gas_per_batch": 5000000,
|
||||
"max_l1_commit_calldata_size_per_batch": 123740,
|
||||
"batch_timeout_sec": 72000000000,
|
||||
"gas_cost_increase_multiplier": 1.2,
|
||||
"max_uncompressed_batch_bytes_size": 634693,
|
||||
"max_chunks_per_batch": 45
|
||||
},
|
||||
"bundle_proposer_config": {
|
||||
"max_batch_num_per_bundle": 45,
|
||||
"bundle_timeout_sec": 36000000000
|
||||
}
|
||||
},
|
||||
"db_config": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "postgres://postgres:postgres@db:5432/scroll?sslmode=disable",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
},
|
||||
"db_config_for_replay": {
|
||||
"driver_name": "postgres",
|
||||
"dsn": "<mainnet read db config>",
|
||||
"maxOpenNum": 200,
|
||||
"maxIdleNum": 20
|
||||
}
|
||||
}
|
||||
@@ -1,19 +0,0 @@
|
||||
{
|
||||
"config": {
|
||||
"chainId": 534352,
|
||||
"bernoulliBlock": 0,
|
||||
"curieBlock": 0,
|
||||
"darwinTime": 0,
|
||||
"darwinV2Time": 0,
|
||||
"euclidTime": 0,
|
||||
"euclidV2Time": 0
|
||||
},
|
||||
"nonce": "0x0000000000000033",
|
||||
"timestamp": "0x0",
|
||||
"parentHash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"gasLimit": "0x8000000",
|
||||
"difficulty": "0x100",
|
||||
"mixhash": "0x0000000000000000000000000000000000000000000000000000000000000000",
|
||||
"coinbase": "0x3333333333333333333333333333333333333333",
|
||||
"alloc": {}
|
||||
}
|
||||
@@ -1,30 +0,0 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-rust-builder:go-1.22-rust-nightly-2023-12-03 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build proposer_tool
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/rollup/cmd/proposer_tool/ && CGO_LDFLAGS="-ldl" go build -v -p 4 -o /bin/proposer_tool
|
||||
|
||||
# Pull proposer_tool into a second stage deploy ubuntu container
|
||||
FROM ubuntu:20.04
|
||||
|
||||
RUN apt update && apt install vim netcat-openbsd net-tools curl ca-certificates -y
|
||||
|
||||
ENV CGO_LDFLAGS="-ldl"
|
||||
|
||||
COPY --from=builder /bin/proposer_tool /bin/
|
||||
WORKDIR /app
|
||||
ENTRYPOINT ["proposer_tool"]
|
||||
@@ -211,7 +211,8 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("testCommitBatchAndFinalizeBundleCodecV4V5V6", testCommitBatchAndFinalizeBundleCodecV4V5V6)
|
||||
t.Run("TestCommitBatchAndFinalizeBundleCodecV7", testCommitBatchAndFinalizeBundleCodecV7)
|
||||
|
||||
// l1 gas oracle
|
||||
// l1/l2 gas oracle
|
||||
t.Run("TestImportL1GasPrice", testImportL1GasPrice)
|
||||
t.Run("TestImportDefaultL1GasPriceDueToL1GasPriceSpike", testImportDefaultL1GasPriceDueToL1GasPriceSpike)
|
||||
t.Run("TestImportL2GasPrice", testImportL2GasPrice)
|
||||
}
|
||||
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
"github.com/scroll-tech/da-codec/encoding"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/params"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
@@ -200,3 +201,56 @@ func testImportDefaultL1GasPriceDueToL1GasPriceSpike(t *testing.T) {
|
||||
assert.Empty(t, blocks[0].OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(blocks[0].GasOracleStatus), types.GasOraclePending)
|
||||
}
|
||||
|
||||
func testImportL2GasPrice(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, false, relayer.ServiceTypeL2GasOracle, nil)
|
||||
assert.NoError(t, err)
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
// add fake chunk
|
||||
chunk := &encoding.Chunk{
|
||||
Blocks: []*encoding.Block{
|
||||
{
|
||||
Header: &gethTypes.Header{
|
||||
Number: big.NewInt(1),
|
||||
ParentHash: common.Hash{},
|
||||
Difficulty: big.NewInt(0),
|
||||
BaseFee: big.NewInt(0),
|
||||
},
|
||||
Transactions: nil,
|
||||
WithdrawRoot: common.Hash{},
|
||||
RowConsumption: &gethTypes.RowConsumption{},
|
||||
},
|
||||
},
|
||||
}
|
||||
batch := &encoding.Batch{
|
||||
Index: 0,
|
||||
TotalL1MessagePoppedBefore: 0,
|
||||
ParentBatchHash: common.Hash{},
|
||||
Chunks: []*encoding.Chunk{chunk},
|
||||
}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), batch, encoding.CodecV0, utils.BatchMetrics{})
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
dbBatch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.Empty(t, dbBatch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(dbBatch.OracleStatus), types.GasOraclePending)
|
||||
|
||||
// relay gas price
|
||||
l2Relayer.ProcessGasPriceOracle()
|
||||
dbBatch, err = batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, dbBatch.OracleTxHash)
|
||||
assert.Equal(t, types.GasOracleStatus(dbBatch.OracleStatus), types.GasOracleImporting)
|
||||
}
|
||||
|
||||
@@ -33,7 +33,7 @@ func testCommitAndFinalizeGenesisBatch(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, ¶ms.ChainConfig{}, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l2Relayer)
|
||||
defer l2Relayer.StopSenders()
|
||||
@@ -65,7 +65,7 @@ func testCommitBatchAndFinalizeBundleCodecV4V5V6(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add some blocks to db
|
||||
@@ -236,7 +236,7 @@ func testCommitBatchAndFinalizeBundleCodecV7(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := rollupApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, chainConfig, true, relayer.ServiceTypeL2RollupRelayer, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
defer l2Relayer.StopSenders()
|
||||
|
||||
632
zkvm-prover/Cargo.lock
generated
632
zkvm-prover/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -18,7 +18,7 @@ serde = { version = "1.0.198", features = ["derive"] }
|
||||
serde_json = "1.0.116"
|
||||
futures = "0.3.30"
|
||||
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.3.0", package = "scroll-zkvm-prover" }
|
||||
scroll-zkvm-prover-euclid = { git = "https://github.com/scroll-tech/zkvm-prover", tag = "v0.2.0", package = "scroll-zkvm-prover" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
ethers-providers = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
scroll-proving-sdk = { git = "https://github.com/scroll-tech/scroll-proving-sdk.git", branch = "main", features = [
|
||||
@@ -51,28 +51,28 @@ openvm-stark-backend = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gp
|
||||
openvm-stark-sdk = { git = "ssh://git@github.com/scroll-tech/openvm-stark-gpu.git", branch = "main", features = ["gpu"] }
|
||||
|
||||
[patch."https://github.com/Plonky3/Plonky3.git"]
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-field = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-commit = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-matrix = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-baby-bear = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", features = [
|
||||
"nightly-features",
|
||||
], tag = "v0.1.1" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.1" }
|
||||
], tag = "v0.1.0" }
|
||||
p3-util = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-challenger = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-dft = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-fri = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-goldilocks = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-keccak = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-keccak-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-blake3 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-mds = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-merkle-tree = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-monty-31 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-poseidon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-poseidon2 = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-poseidon2-air = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-symmetric = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-uni-stark = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
p3-maybe-rayon = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" } # the "parallel" feature is NOT on by default to allow single-threaded benchmarking
|
||||
p3-bn254-fr = { git = "ssh://git@github.com/scroll-tech/plonky3-gpu.git", tag = "v0.1.0" }
|
||||
|
||||
Reference in New Issue
Block a user