mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 23:18:07 -05:00
Compare commits
144 Commits
multi_veri
...
v4.3.9
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
5eed174b9e | ||
|
|
a79992e772 | ||
|
|
2a54c8aae6 | ||
|
|
3bd0f2fa38 | ||
|
|
ae95e61902 | ||
|
|
8bae647921 | ||
|
|
1f4df55d12 | ||
|
|
f21d4adbfd | ||
|
|
145edfccb9 | ||
|
|
6f26114f23 | ||
|
|
56150da353 | ||
|
|
fc53572334 | ||
|
|
0730e91292 | ||
|
|
4d3ff66446 | ||
|
|
ae1cb30ed1 | ||
|
|
ad14836796 | ||
|
|
9a5517472f | ||
|
|
1c7490a88e | ||
|
|
7559dc42b4 | ||
|
|
49b72bd4e4 | ||
|
|
8c41b0b86b | ||
|
|
cd456ee3db | ||
|
|
2b6a3c9874 | ||
|
|
d39db01c5b | ||
|
|
90e3dddf91 | ||
|
|
c6cc51bcfd | ||
|
|
0b0b84a513 | ||
|
|
83c8071123 | ||
|
|
e35de74495 | ||
|
|
33089b829f | ||
|
|
8e27052b36 | ||
|
|
25b956f9b5 | ||
|
|
f4663fd249 | ||
|
|
c71fa5a5fc | ||
|
|
4af3834e36 | ||
|
|
9983585bdd | ||
|
|
d288b34536 | ||
|
|
a2fe246551 | ||
|
|
8699a22fa3 | ||
|
|
d668180e9a | ||
|
|
d3c2e34650 | ||
|
|
38551c4eeb | ||
|
|
82330621ce | ||
|
|
237d048ce6 | ||
|
|
38b440b1fa | ||
|
|
0ed3d68fc3 | ||
|
|
2847d02498 | ||
|
|
a7d9bf3488 | ||
|
|
255eb5f307 | ||
|
|
0955382aec | ||
|
|
07961f751e | ||
|
|
bbd5a5a9c2 | ||
|
|
2e1f42fcb6 | ||
|
|
bcdbe1f119 | ||
|
|
6034c43bb1 | ||
|
|
f9da81d587 | ||
|
|
38f64e70b7 | ||
|
|
44b924170a | ||
|
|
227f09a2cf | ||
|
|
112e82a4ef | ||
|
|
82e6d28e82 | ||
|
|
8daa5d5496 | ||
|
|
3958e8bd86 | ||
|
|
f553a70d20 | ||
|
|
2dc5ceb44c | ||
|
|
ff03924d76 | ||
|
|
f6894bb82f | ||
|
|
e990e02391 | ||
|
|
f8d48a6326 | ||
|
|
dba097e03d | ||
|
|
30ad0bfe78 | ||
|
|
1dfca3b7c0 | ||
|
|
826e847b5a | ||
|
|
8c71a6d22a | ||
|
|
87f18efba8 | ||
|
|
fecd129a39 | ||
|
|
663156984f | ||
|
|
3499c595e7 | ||
|
|
95d2df46e3 | ||
|
|
102d29c54d | ||
|
|
7d50699344 | ||
|
|
e08b800d1d | ||
|
|
6139ca0df0 | ||
|
|
24a0fd08ac | ||
|
|
2840485f38 | ||
|
|
dab21fc712 | ||
|
|
c44b7f7bf4 | ||
|
|
a8c71b5e36 | ||
|
|
ae2f62df00 | ||
|
|
ce5c6e0aa3 | ||
|
|
e8ddf99184 | ||
|
|
ebf2b429a3 | ||
|
|
db46ce408d | ||
|
|
3d1a8374d0 | ||
|
|
574aa68491 | ||
|
|
589388b288 | ||
|
|
af8175800f | ||
|
|
e6793a85f5 | ||
|
|
7e9f3b7376 | ||
|
|
8b611b443a | ||
|
|
1f62596b0a | ||
|
|
a2b9878733 | ||
|
|
e166dfba48 | ||
|
|
d7d62eb8e6 | ||
|
|
61eb75fbb6 | ||
|
|
e9af5912ac | ||
|
|
c3b2f13fe0 | ||
|
|
0d847e0427 | ||
|
|
7de221ba89 | ||
|
|
16fd840896 | ||
|
|
20fb098a01 | ||
|
|
b734282a44 | ||
|
|
30bbfc0229 | ||
|
|
60bea74444 | ||
|
|
ae2e010324 | ||
|
|
767a2cbfdf | ||
|
|
74e5de156e | ||
|
|
becfd41b0e | ||
|
|
8542a176ed | ||
|
|
ea40517527 | ||
|
|
1f151f6ae5 | ||
|
|
ca76835712 | ||
|
|
247dd24a8f | ||
|
|
b006eaa33e | ||
|
|
ee4d7c3549 | ||
|
|
3c3f56b9b0 | ||
|
|
95121093c8 | ||
|
|
b85a109fd3 | ||
|
|
c091081f70 | ||
|
|
e42397867b | ||
|
|
9a5ad83121 | ||
|
|
ba90865ec6 | ||
|
|
66f3b42d24 | ||
|
|
a9a6b7464a | ||
|
|
24898602de | ||
|
|
623213a67a | ||
|
|
8e8a9c0351 | ||
|
|
974930f051 | ||
|
|
9654d76356 | ||
|
|
373e98ff3c | ||
|
|
34c3b91fd7 | ||
|
|
3adf077070 | ||
|
|
2a4d6e05a1 | ||
|
|
0ee99024bb |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -20,7 +20,7 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
|
||||
### Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
Has `tag` in `common/version.go` been updated or have you added `bump-version` label to this PR?
|
||||
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
37
.github/scripts/bump_version_dot_go.mjs
vendored
Normal file
37
.github/scripts/bump_version_dot_go.mjs
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import { URL } from "url";
|
||||
import { readFileSync, writeFileSync } from "fs";
|
||||
|
||||
const versionFilePath = new URL(
|
||||
"../../common/version/version.go",
|
||||
import.meta.url
|
||||
).pathname;
|
||||
|
||||
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
|
||||
|
||||
const currentVersion = versionFileContent.match(
|
||||
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
|
||||
);
|
||||
|
||||
try {
|
||||
parseInt(currentVersion.groups.major);
|
||||
parseInt(currentVersion.groups.minor);
|
||||
parseInt(currentVersion.groups.patch);
|
||||
} catch (err) {
|
||||
console.error(new Error("Failed to parse version in version.go file"));
|
||||
throw err;
|
||||
}
|
||||
|
||||
// prettier-ignore
|
||||
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
|
||||
|
||||
console.log(
|
||||
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
|
||||
);
|
||||
|
||||
writeFileSync(
|
||||
versionFilePath,
|
||||
versionFileContent.replace(
|
||||
`var tag = "${currentVersion.groups.version}"`,
|
||||
`var tag = "${newVersion}"`
|
||||
)
|
||||
);
|
||||
63
.github/workflows/bump_version.yml
vendored
Normal file
63
.github/workflows/bump_version.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Bump version
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ develop ]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
try-to-bump:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'bump-version')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
- name: check diff
|
||||
id: check_diff
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# fetch develop branch so that we can diff against later
|
||||
git fetch origin develop
|
||||
|
||||
echo 'checking verion changes in diff...'
|
||||
|
||||
# check if version changed in version.go
|
||||
# note: the grep will fail if use \d instead of [0-9]
|
||||
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
|
||||
|
||||
exit_code=$?
|
||||
|
||||
# auto bump if version is not bumped manually
|
||||
echo '> require auto version bump?'
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo '> no, already bumped'
|
||||
echo "result=no-bump" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo '> yes'
|
||||
echo "result=bump" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Install Node.js 16
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: bump version in common/version/version.go
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
run: node .github/scripts/bump_version_dot_go.mjs
|
||||
|
||||
# Commits made by this Action do not trigger new Workflow runs
|
||||
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
with:
|
||||
skip_fetch: true # already did fetch in check diff
|
||||
file_pattern: "common/version/version.go"
|
||||
commit_message: "chore: auto version bump [bot]"
|
||||
4
.github/workflows/common.yml
vendored
4
.github/workflows/common.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -18,6 +19,7 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
jobs:
|
||||
@@ -88,8 +90,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
4
.github/workflows/coordinator.yml
vendored
4
.github/workflows/coordinator.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
@@ -21,6 +22,7 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
@@ -104,8 +106,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
4
.github/workflows/database.yml
vendored
4
.github/workflows/database.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -20,6 +21,7 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
jobs:
|
||||
@@ -81,8 +83,6 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
|
||||
@@ -48,27 +48,6 @@ jobs:
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
msg_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push msg_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/msg_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/msg-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -153,24 +132,3 @@ jobs:
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
prover-stats-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push prover-stats-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/prover-stats-api.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/prover-stats-api:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
@@ -33,12 +33,10 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
make -C rollup mock_abi
|
||||
make -C common/bytecode all
|
||||
- name: Run integration tests
|
||||
run: |
|
||||
80
.github/workflows/prover_stats_api.yml
vendored
80
.github/workflows/prover_stats_api.yml
vendored
@@ -1,80 +0,0 @@
|
||||
name: ProverStatsAPI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
- staging
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'prover-stats-api/**'
|
||||
- '.github/workflows/prover_stats_api.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'prover-stats-api/**'
|
||||
- '.github/workflows/prover_stats_api.yml'
|
||||
|
||||
defaults:
|
||||
run:
|
||||
working-directory: 'prover-stats-api'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Lint
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make lint
|
||||
test:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Test
|
||||
run: |
|
||||
make test
|
||||
- name: Upload coverage reports to Codecov
|
||||
uses: codecov/codecov-action@v3
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: prover-stats-api
|
||||
goimports-lint:
|
||||
if: github.event.pull_request.draft == false
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Install Go
|
||||
uses: actions/setup-go@v2
|
||||
with:
|
||||
go-version: 1.19.x
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- run: goimports -local scroll-tech/prover-stats-api/ -w .
|
||||
- run: go mod tidy
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,4 +1,4 @@
|
||||
name: Bridge
|
||||
name: Rollup
|
||||
|
||||
on:
|
||||
push:
|
||||
@@ -8,10 +8,11 @@ on:
|
||||
- develop
|
||||
- alpha
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'rollup/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
- '.github/workflows/rollup.yml'
|
||||
pull_request:
|
||||
types:
|
||||
- opened
|
||||
@@ -19,10 +20,11 @@ on:
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'rollup/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
- '.github/workflows/rollup.yml'
|
||||
|
||||
jobs:
|
||||
check:
|
||||
@@ -43,10 +45,8 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Lint
|
||||
working-directory: 'bridge'
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
rm -rf $HOME/.cache/golangci-lint
|
||||
make mock_abi
|
||||
@@ -64,14 +64,14 @@ jobs:
|
||||
- name: Install goimports
|
||||
run: go install golang.org/x/tools/cmd/goimports
|
||||
- name: Run goimports lint
|
||||
run: goimports -local scroll-tech/bridge/ -w .
|
||||
working-directory: 'bridge'
|
||||
run: goimports -local scroll-tech/rollup/ -w .
|
||||
working-directory: 'rollup'
|
||||
- name: Run go mod tidy
|
||||
run: go mod tidy
|
||||
working-directory: 'bridge'
|
||||
working-directory: 'rollup'
|
||||
# If there are any diffs from goimports or go mod tidy, fail.
|
||||
- name: Verify no changes from goimports and go mod tidy
|
||||
working-directory: 'bridge'
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
if [ -n "$(git status --porcelain)" ]; then
|
||||
exit 1
|
||||
@@ -94,18 +94,16 @@ jobs:
|
||||
version: '0.8.16'
|
||||
- name: Install Geth Tools
|
||||
uses: gacts/install-geth-tools@v1
|
||||
with:
|
||||
version: 1.10.19
|
||||
- name: Build prerequisites
|
||||
run: |
|
||||
make dev_docker
|
||||
make -C bridge mock_abi
|
||||
- name: Build bridge binaries
|
||||
working-directory: 'bridge'
|
||||
make -C rollup mock_abi
|
||||
- name: Build rollup binaries
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
make bridge_bins
|
||||
- name: Test bridge packages
|
||||
working-directory: 'bridge'
|
||||
make rollup_bins
|
||||
- name: Test rollup packages
|
||||
working-directory: 'rollup'
|
||||
run: |
|
||||
go test -v -race -gcflags="-l" -ldflags="-s=false" -coverprofile=coverage.txt -covermode=atomic ./...
|
||||
- name: Upload coverage reports to Codecov
|
||||
@@ -113,7 +111,7 @@ jobs:
|
||||
env:
|
||||
CODECOV_TOKEN: ${{ secrets.CODECOV_TOKEN }}
|
||||
with:
|
||||
flags: bridge
|
||||
flags: rollup
|
||||
# docker-build:
|
||||
# if: github.event.pull_request.draft == false
|
||||
# runs-on: ubuntu-latest
|
||||
17
.gitignore
vendored
17
.gitignore
vendored
@@ -1,9 +1,22 @@
|
||||
.idea
|
||||
# Asset files
|
||||
assets/params*
|
||||
assets/seed
|
||||
coverage.txt
|
||||
|
||||
# Built binaries
|
||||
build/bin
|
||||
|
||||
coverage.txt
|
||||
*.integration.txt
|
||||
|
||||
# Visual Studio Code
|
||||
.vscode
|
||||
|
||||
# IntelliJ
|
||||
.idea
|
||||
|
||||
# MacOS
|
||||
.DS_Store
|
||||
|
||||
# misc
|
||||
sftp-config.json
|
||||
*~
|
||||
|
||||
128
CODE_OF_CONDUCT.md
Normal file
128
CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,128 @@
|
||||
# Contributor Covenant Code of Conduct
|
||||
|
||||
## Our Pledge
|
||||
|
||||
We as members, contributors, and leaders pledge to make participation in our
|
||||
community a harassment-free experience for everyone, regardless of age, body
|
||||
size, visible or invisible disability, ethnicity, sex characteristics, gender
|
||||
identity and expression, level of experience, education, socio-economic status,
|
||||
nationality, personal appearance, race, religion, or sexual identity
|
||||
and orientation.
|
||||
|
||||
We pledge to act and interact in ways that contribute to an open, welcoming,
|
||||
diverse, inclusive, and healthy community.
|
||||
|
||||
## Our Standards
|
||||
|
||||
Examples of behavior that contributes to a positive environment for our
|
||||
community include:
|
||||
|
||||
* Demonstrating empathy and kindness toward other people
|
||||
* Being respectful of differing opinions, viewpoints, and experiences
|
||||
* Giving and gracefully accepting constructive feedback
|
||||
* Accepting responsibility and apologizing to those affected by our mistakes,
|
||||
and learning from the experience
|
||||
* Focusing on what is best not just for us as individuals, but for the
|
||||
overall community
|
||||
|
||||
Examples of unacceptable behavior include:
|
||||
|
||||
* The use of sexualized language or imagery, and sexual attention or
|
||||
advances of any kind
|
||||
* Trolling, insulting or derogatory comments, and personal or political attacks
|
||||
* Public or private harassment
|
||||
* Publishing others' private information, such as a physical or email
|
||||
address, without their explicit permission
|
||||
* Other conduct which could reasonably be considered inappropriate in a
|
||||
professional setting
|
||||
|
||||
## Enforcement Responsibilities
|
||||
|
||||
Community leaders are responsible for clarifying and enforcing our standards of
|
||||
acceptable behavior and will take appropriate and fair corrective action in
|
||||
response to any behavior that they deem inappropriate, threatening, offensive,
|
||||
or harmful.
|
||||
|
||||
Community leaders have the right and responsibility to remove, edit, or reject
|
||||
comments, commits, code, wiki edits, issues, and other contributions that are
|
||||
not aligned to this Code of Conduct, and will communicate reasons for moderation
|
||||
decisions when appropriate.
|
||||
|
||||
## Scope
|
||||
|
||||
This Code of Conduct applies within all community spaces, and also applies when
|
||||
an individual is officially representing the community in public spaces.
|
||||
Examples of representing our community include using an official e-mail address,
|
||||
posting via an official social media account, or acting as an appointed
|
||||
representative at an online or offline event.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Instances of abusive, harassing, or otherwise unacceptable behavior may be
|
||||
reported to the community leaders responsible for enforcement at
|
||||
contributor@scroll.io.
|
||||
All complaints will be reviewed and investigated promptly and fairly.
|
||||
|
||||
All community leaders are obligated to respect the privacy and security of the
|
||||
reporter of any incident.
|
||||
|
||||
## Enforcement Guidelines
|
||||
|
||||
Community leaders will follow these Community Impact Guidelines in determining
|
||||
the consequences for any action they deem in violation of this Code of Conduct:
|
||||
|
||||
### 1. Correction
|
||||
|
||||
**Community Impact**: Use of inappropriate language or other behavior deemed
|
||||
unprofessional or unwelcome in the community.
|
||||
|
||||
**Consequence**: A private, written warning from community leaders, providing
|
||||
clarity around the nature of the violation and an explanation of why the
|
||||
behavior was inappropriate. A public apology may be requested.
|
||||
|
||||
### 2. Warning
|
||||
|
||||
**Community Impact**: A violation through a single incident or series
|
||||
of actions.
|
||||
|
||||
**Consequence**: A warning with consequences for continued behavior. No
|
||||
interaction with the people involved, including unsolicited interaction with
|
||||
those enforcing the Code of Conduct, for a specified period of time. This
|
||||
includes avoiding interactions in community spaces as well as external channels
|
||||
like social media. Violating these terms may lead to a temporary or
|
||||
permanent ban.
|
||||
|
||||
### 3. Temporary Ban
|
||||
|
||||
**Community Impact**: A serious violation of community standards, including
|
||||
sustained inappropriate behavior.
|
||||
|
||||
**Consequence**: A temporary ban from any sort of interaction or public
|
||||
communication with the community for a specified period of time. No public or
|
||||
private interaction with the people involved, including unsolicited interaction
|
||||
with those enforcing the Code of Conduct, is allowed during this period.
|
||||
Violating these terms may lead to a permanent ban.
|
||||
|
||||
### 4. Permanent Ban
|
||||
|
||||
**Community Impact**: Demonstrating a pattern of violation of community
|
||||
standards, including sustained inappropriate behavior, harassment of an
|
||||
individual, or aggression toward or disparagement of classes of individuals.
|
||||
|
||||
**Consequence**: A permanent ban from any sort of public interaction within
|
||||
the community.
|
||||
|
||||
## Attribution
|
||||
|
||||
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
|
||||
version 2.0, available at
|
||||
https://www.contributor-covenant.org/version/2/0/code_of_conduct.html.
|
||||
|
||||
Community Impact Guidelines were inspired by [Mozilla's code of conduct
|
||||
enforcement ladder](https://github.com/mozilla/diversity).
|
||||
|
||||
[homepage]: https://www.contributor-covenant.org
|
||||
|
||||
For answers to common questions about this code of conduct, see the FAQ at
|
||||
https://www.contributor-covenant.org/faq. Translations are available at
|
||||
https://www.contributor-covenant.org/translations.
|
||||
42
CONTRIBUTING.md
Normal file
42
CONTRIBUTING.md
Normal file
@@ -0,0 +1,42 @@
|
||||
## Contributing
|
||||
|
||||
[fork]: /fork
|
||||
[pr]: /compare
|
||||
[style]: https://standardjs.com/
|
||||
[code-of-conduct]: CODE_OF_CONDUCT.md
|
||||
|
||||
Hi there! We're thrilled that you'd like to contribute to this project. Your help is essential for keeping it great.
|
||||
|
||||
Please note that this project is released with a [Contributor Code of Conduct][code-of-conduct]. By participating in this project you agree to abide by its terms.
|
||||
|
||||
## Contribute to Scroll
|
||||
|
||||
Did you know there are many ways of contributing to Scroll? If you are looking to contribute to by adding Scroll to existing Dev Tools or by doing integrations please go to the [Contribute to Scroll](https://github.com/scroll-tech/contribute-to-scroll) repo instead. If you are looking to contribute to Scroll's Halo2 zkEVM circuits please refer to the [zkEVM circuits](https://github.com/scroll-tech/zkevm-circuits) repo. This repository covers the Scroll infrastructure and smart contracts, if you want to contribute to these areas continue reading this document.
|
||||
|
||||
## Issues and PRs
|
||||
|
||||
If you have suggestions for how this project could be improved, or want to report a bug, open an issue! We'd love all and any contributions. If you have questions, too, we'd love to hear them.
|
||||
|
||||
We'd also love PRs. If you're thinking of a large PR, we advise opening up an issue first to talk about it, though! Look at the links below if you're not sure how to open a PR.
|
||||
|
||||
## Submitting a pull request
|
||||
|
||||
1. [Fork][fork] and clone the repository.
|
||||
1. Create a new branch: `git checkout -b my-branch-name`.
|
||||
1. Make your change, add tests, and make sure the tests still pass.
|
||||
1. Push to your fork and [submit a pull request][pr].
|
||||
1. Pat yourself on the back and wait for your pull request to be reviewed and merged.
|
||||
|
||||
Here are a few things you can do that will increase the likelihood of your pull request being accepted:
|
||||
|
||||
- Write and update tests.
|
||||
- Keep your changes as focused as possible. If there are multiple changes you would like to make that are not dependent upon each other, consider submitting them as separate pull requests.
|
||||
- Write a [good commit message](http://tbaggery.com/2008/04/19/a-note-about-git-commit-messages.html).
|
||||
|
||||
Work in Progress pull requests are also welcome to get feedback early on, or if there is something that blocked you.
|
||||
|
||||
## Resources
|
||||
|
||||
- [How to Contribute to Open Source](https://opensource.guide/how-to-contribute/)
|
||||
- [Using Pull Requests](https://help.github.com/articles/about-pull-requests/)
|
||||
- [GitHub Help](https://help.github.com)
|
||||
20
Makefile
20
Makefile
@@ -1,12 +1,14 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.55
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
awk 'BEGIN {FS = ":.*?## "}; {printf "\033[36m%-30s\033[0m %s\n", $$1, $$2}'
|
||||
|
||||
lint: ## The code's format and security checks.
|
||||
make -C bridge lint
|
||||
make -C rollup lint
|
||||
make -C common lint
|
||||
make -C coordinator lint
|
||||
make -C database lint
|
||||
@@ -15,15 +17,15 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/rollup/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.34 && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
goimports -local $(PWD)/rollup/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
goimports -local $(PWD)/coordinator/ -w .
|
||||
|
||||
40
README.md
40
README.md
@@ -1,7 +1,33 @@
|
||||
# Scroll Monorepo
|
||||
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/rollup.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/contracts.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/bridge_history_api.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/coordinator.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/prover.yml)
|
||||
[](https://github.com/scroll-tech/scroll/actions/workflows/integration.yml)
|
||||
[](https://codecov.io/gh/scroll-tech/scroll)
|
||||
|
||||
<a href="https://scroll.io">Scroll</a> is a zkRollup Layer 2 dedicated to enhance Ethereum scalability through a bytecode-equivalent [zkEVM](https://github.com/scroll-tech/zkevm-circuits) circuit. This monorepo encompasses essential infrastructure components of the Scroll protocol. It contains the L1 and L2 contracts, the rollup node, the prover client, and the prover coordinator.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
<pre>
|
||||
├── <a href="./bridge-history-api/">bridge-history-api</a>: Bridge history service that collects deposit and withdraw events from both L1 and L2 chain and generates withdrawal proofs
|
||||
├── <a href="./common/">common</a>: Common libraries and types
|
||||
├── <a href="./coordinator/">coordinator</a>: Prover coordinator service that dispatches proving tasks to provers
|
||||
├── <a href="./database">database</a>: Database client and schema definition
|
||||
├── <a href="./src">l2geth</a>: Scroll execution node
|
||||
├── <a href="./prover">prover</a>: Prover client that runs proof generation for zkEVM circuit and aggregation circuit
|
||||
├── <a href="./rollup">rollup</a>: Rollup-related services
|
||||
├── <a href="./rpc-gateway">rpc-gateway</a>: RPC gateway external repo
|
||||
└── <a href="./tests">tests</a>: Integration tests
|
||||
</pre>
|
||||
|
||||
## Contributing
|
||||
|
||||
We welcome community contributions to this repository. Before you submit any issues or PRs, please read the [Code of Conduct](CODE_OF_CONDUCT.md) and the [Contribution Guideline](CONTRIBUTING.md).
|
||||
|
||||
## Prerequisites
|
||||
+ Go 1.19
|
||||
+ Rust (for version, see [rust-toolchain](./common/libzkp/impl/rust-toolchain))
|
||||
@@ -15,14 +41,14 @@ docker pull postgres
|
||||
make dev_docker
|
||||
```
|
||||
|
||||
## Testing Bridge & Coordinator
|
||||
## Testing Rollup & Coordinator
|
||||
|
||||
### For Non-Apple Silicon (M1/M2) Macs
|
||||
|
||||
Run the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
@@ -55,7 +81,7 @@ This command runs a Docker container named `scroll_test_container` from the `scr
|
||||
Once the Docker container is running, execute the tests using the following commands:
|
||||
|
||||
```bash
|
||||
go test -v -race -covermode=atomic scroll-tech/bridge/...
|
||||
go test -v -race -covermode=atomic scroll-tech/rollup/...
|
||||
go test -tags="mock_verifier" -v -race -covermode=atomic scroll-tech/coordinator/...
|
||||
go test -v -race -covermode=atomic scroll-tech/database/...
|
||||
go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
@@ -63,6 +89,10 @@ go test -v -race -covermode=atomic scroll-tech/common/...
|
||||
|
||||
## Testing Contracts
|
||||
|
||||
You can find the unit tests in [`<REPO_DIR>/contracts/src/test/`](/contracts/src/test/), and integration tests in [`<REPO_DIR>/contracts/integration-test/`](/contracts/integration-test/).
|
||||
You can find the unit tests in [`contracts/src/test/`](/contracts/src/test/), and integration tests in [`contracts/integration-test/`](/contracts/integration-test/).
|
||||
|
||||
For more details on contracts, see [`/contracts`](/contracts).
|
||||
See [`contracts`](/contracts) for more details on the contracts.
|
||||
|
||||
## License
|
||||
|
||||
Scroll Monorepo is licensed under the [MIT](./LICENSE) license.
|
||||
|
||||
@@ -1 +1,82 @@
|
||||
# bridge-history-api
|
||||
|
||||
This directory contains the `bridge-history-api` service that provides REST APIs to query txs interact with Scroll official bridge contracts
|
||||
|
||||
## Instructions
|
||||
The bridge-history-api contains three distinct components
|
||||
|
||||
### bridgehistoryapi-db-cli
|
||||
|
||||
Provide init, show version, rollback, check status services of DB
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-db-cli
|
||||
./build/bin/bridgehistoryapi-db-cli [command]
|
||||
```
|
||||
|
||||
### bridgehistoryapi-cross-msg-fetcher
|
||||
|
||||
Fetch the transactions from both l1 and l2
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-cross-msg-fetcher
|
||||
./build/bin/bridgehistoryapi-cross-msg-fetcher
|
||||
```
|
||||
|
||||
### bridgehistoryapi-server
|
||||
|
||||
provides REST APIs. Please refer to the API details below.
|
||||
```
|
||||
cd ./bridge-history-api
|
||||
make bridgehistoryapi-server
|
||||
./build/bin/bridgehistoryapi-server
|
||||
```
|
||||
|
||||
## APIs provided by bridgehistoryapi-server
|
||||
|
||||
assume `bridgehistoryapi-server` listening on `https://localhost:8080`
|
||||
can change this port thru modify `config.json`
|
||||
|
||||
1. `/txs`
|
||||
```
|
||||
// @Summary get all txs under given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param address query string true "wallet address"
|
||||
// @Param page_size query int true "page size"
|
||||
// @Param page query int true "page"
|
||||
// @Success 200
|
||||
// @Router /api/txs [get]
|
||||
```
|
||||
|
||||
2. `/txsbyhashes`
|
||||
```
|
||||
// @Summary get txs by given tx hashes
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param hashes query string array true "array of hashes list"
|
||||
// @Success 200
|
||||
// @Router /api/txsbyhashes [post]
|
||||
```
|
||||
|
||||
3. `/claimable`
|
||||
```
|
||||
// @Summary get all claimable txs under given address
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param address query string true "wallet address"
|
||||
// @Param page_size query int true "page size"
|
||||
// @Param page query int true "page"
|
||||
// @Success 200
|
||||
// @Router /api/claimable [get]
|
||||
```
|
||||
|
||||
4. `/withdraw_root`
|
||||
```
|
||||
// @Summary get withdraw_root of given batch index
|
||||
// @Accept plain
|
||||
// @Produce plain
|
||||
// @Param batch_index query string true "batch_index"
|
||||
// @Success 200
|
||||
// @Router /api/withdraw_root [get]
|
||||
```
|
||||
@@ -89,9 +89,6 @@ var (
|
||||
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L2FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
|
||||
L2ImportBlockEventSignature common.Hash
|
||||
|
||||
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
|
||||
L2AppendMessageEventSignature common.Hash
|
||||
)
|
||||
@@ -153,8 +150,6 @@ func init() {
|
||||
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
|
||||
|
||||
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
|
||||
|
||||
}
|
||||
|
||||
@@ -95,6 +95,9 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
} else {
|
||||
startHeight = latestBatchHeight + 1
|
||||
}
|
||||
if startHeight < b.batchInfoStartNumber {
|
||||
startHeight = b.batchInfoStartNumber
|
||||
}
|
||||
for from := startHeight; number >= from; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
|
||||
@@ -116,7 +116,7 @@ require (
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
|
||||
@@ -531,8 +531,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
|
||||
@@ -24,14 +24,14 @@ func NewBatchController(db *gorm.DB) *BatchController {
|
||||
func (b *BatchController) GetWithdrawRootByBatchIndex(ctx *gin.Context) {
|
||||
var req types.QueryByBatchIndexRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
result, err := b.batchLogic.GetWithdrawRootByBatchIndex(ctx, req.BatchIndex)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrGetWithdrawRootByBatchIndexFailure, err)
|
||||
return
|
||||
}
|
||||
|
||||
types.RenderJSON(ctx, types.Success, nil, result)
|
||||
types.RenderSuccess(ctx, result)
|
||||
}
|
||||
|
||||
@@ -10,7 +10,12 @@ var (
|
||||
// HistoryCtrler is controller instance
|
||||
HistoryCtrler *HistoryController
|
||||
// BatchCtrler is controller instance
|
||||
BatchCtrler *BatchController
|
||||
BatchCtrler *BatchController
|
||||
// HealthCheck the health check controller
|
||||
HealthCheck *HealthCheckController
|
||||
// Ready the ready controller
|
||||
Ready *ReadyController
|
||||
|
||||
initControllerOnce sync.Once
|
||||
)
|
||||
|
||||
@@ -19,5 +24,7 @@ func InitController(db *gorm.DB) {
|
||||
initControllerOnce.Do(func() {
|
||||
HistoryCtrler = NewHistoryController(db)
|
||||
BatchCtrler = NewBatchController(db)
|
||||
HealthCheck = NewHealthCheckController(db)
|
||||
Ready = NewReadyController()
|
||||
})
|
||||
}
|
||||
|
||||
30
bridge-history-api/internal/controller/health_check.go
Normal file
30
bridge-history-api/internal/controller/health_check.go
Normal file
@@ -0,0 +1,30 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"bridge-history-api/internal/types"
|
||||
"bridge-history-api/utils"
|
||||
)
|
||||
|
||||
// HealthCheckController is health check API
|
||||
type HealthCheckController struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewHealthCheckController returns an HealthCheckController instance
|
||||
func NewHealthCheckController(db *gorm.DB) *HealthCheckController {
|
||||
return &HealthCheckController{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheck the api controller for coordinator health check
|
||||
func (a *HealthCheckController) HealthCheck(c *gin.Context) {
|
||||
if _, err := utils.Ping(a.db); err != nil {
|
||||
types.RenderFatal(c, err)
|
||||
return
|
||||
}
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
@@ -25,18 +25,18 @@ func NewHistoryController(db *gorm.DB) *HistoryController {
|
||||
func (c *HistoryController) GetAllClaimableTxsByAddr(ctx *gin.Context) {
|
||||
var req types.QueryByAddressRequest
|
||||
if err := ctx.ShouldBind(&req); err != nil {
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
offset := (req.Page - 1) * req.PageSize
|
||||
limit := req.PageSize
|
||||
txs, total, err := c.historyLogic.GetClaimableTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetClaimablesFailure, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrGetClaimablesFailure, err)
|
||||
return
|
||||
}
|
||||
|
||||
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: txs, Total: total})
|
||||
types.RenderSuccess(ctx, &types.ResultData{Result: txs, Total: total})
|
||||
}
|
||||
|
||||
// GetAllTxsByAddr defines the http get method behavior
|
||||
@@ -50,23 +50,23 @@ func (c *HistoryController) GetAllTxsByAddr(ctx *gin.Context) {
|
||||
limit := req.PageSize
|
||||
message, total, err := c.historyLogic.GetTxsByAddress(ctx, common.HexToAddress(req.Address), offset, limit)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetTxsByAddrFailure, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByAddrFailure, err)
|
||||
return
|
||||
}
|
||||
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: message, Total: total})
|
||||
types.RenderSuccess(ctx, &types.ResultData{Result: message, Total: total})
|
||||
}
|
||||
|
||||
// PostQueryTxsByHash defines the http post method behavior
|
||||
func (c *HistoryController) PostQueryTxsByHash(ctx *gin.Context) {
|
||||
var req types.QueryByHashRequest
|
||||
if err := ctx.ShouldBindJSON(&req); err != nil {
|
||||
types.RenderJSON(ctx, types.ErrParameterInvalidNo, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrParameterInvalidNo, err)
|
||||
return
|
||||
}
|
||||
result, err := c.historyLogic.GetTxsByHashes(ctx, req.Txs)
|
||||
if err != nil {
|
||||
types.RenderJSON(ctx, types.ErrGetTxsByHashFailure, err, nil)
|
||||
types.RenderFailure(ctx, types.ErrGetTxsByHashFailure, err)
|
||||
return
|
||||
}
|
||||
types.RenderJSON(ctx, types.Success, nil, &types.ResultData{Result: result, Total: 0})
|
||||
types.RenderSuccess(ctx, &types.ResultData{Result: result, Total: 0})
|
||||
}
|
||||
|
||||
21
bridge-history-api/internal/controller/ready.go
Normal file
21
bridge-history-api/internal/controller/ready.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package controller
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
|
||||
"bridge-history-api/internal/types"
|
||||
)
|
||||
|
||||
// ReadyController ready API
|
||||
type ReadyController struct {
|
||||
}
|
||||
|
||||
// NewReadyController returns an ReadyController instance
|
||||
func NewReadyController() *ReadyController {
|
||||
return &ReadyController{}
|
||||
}
|
||||
|
||||
// Ready the api controller for coordinator ready
|
||||
func (r *ReadyController) Ready(c *gin.Context) {
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
@@ -25,4 +25,6 @@ func Route(router *gin.Engine, conf *config.Config) {
|
||||
r.POST("/txsbyhashes", controller.HistoryCtrler.PostQueryTxsByHash)
|
||||
r.GET("/claimable", controller.HistoryCtrler.GetAllClaimableTxsByAddr)
|
||||
r.GET("/withdraw_root", controller.BatchCtrler.GetWithdrawRootByBatchIndex)
|
||||
r.GET("/health", controller.HealthCheck.HealthCheck)
|
||||
r.GET("/ready", controller.Ready.Ready)
|
||||
}
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
const (
|
||||
// Success shows OK.
|
||||
Success = 0
|
||||
// InternalServerError shows a fatal error in the server
|
||||
InternalServerError = 500
|
||||
// ErrParameterInvalidNo is invalid params
|
||||
ErrParameterInvalidNo = 40001
|
||||
// ErrGetClaimablesFailure is getting all claimables txs error
|
||||
@@ -103,3 +105,28 @@ func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
}
|
||||
ctx.JSON(http.StatusOK, renderData)
|
||||
}
|
||||
|
||||
// RenderSuccess renders success response with json
|
||||
func RenderSuccess(ctx *gin.Context, data interface{}) {
|
||||
RenderJSON(ctx, Success, nil, data)
|
||||
}
|
||||
|
||||
// RenderFailure renders failure response with json
|
||||
func RenderFailure(ctx *gin.Context, errCode int, err error) {
|
||||
RenderJSON(ctx, errCode, err, nil)
|
||||
}
|
||||
|
||||
// RenderFatal renders fatal response with json
|
||||
func RenderFatal(ctx *gin.Context, err error) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: InternalServerError,
|
||||
ErrMsg: errMsg,
|
||||
Data: nil,
|
||||
}
|
||||
ctx.Set("errcode", InternalServerError)
|
||||
ctx.JSON(http.StatusInternalServerError, renderData)
|
||||
}
|
||||
|
||||
@@ -2,6 +2,7 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -67,18 +68,29 @@ func InitDB(config *config.DBConfig) (*gorm.DB, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
sqlDB, pingErr := Ping(db)
|
||||
if pingErr != nil {
|
||||
return nil, pingErr
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
return db, nil
|
||||
}
|
||||
|
||||
// Ping check db status
|
||||
func Ping(db *gorm.DB) (*sql.DB, error) {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
return sqlDB, nil
|
||||
}
|
||||
|
||||
// CloseDB close the db handler. notice the db handler only can close when then program exit.
|
||||
|
||||
@@ -1,36 +0,0 @@
|
||||
# Bridge
|
||||
|
||||
This repo contains the Scroll bridge.
|
||||
|
||||
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
|
||||
between the two, over JSON-RPC sockets.
|
||||
|
||||
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
|
||||
|
||||
## Dependency
|
||||
|
||||
+ install `abigen`
|
||||
|
||||
``` bash
|
||||
go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make bridge
|
||||
```
|
||||
|
||||
## Start
|
||||
* use default ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --http
|
||||
```
|
||||
|
||||
* use specified ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
```
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,93 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up message-relayer app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "message-relayer"
|
||||
app.Usage = "The Scroll Message Relayer"
|
||||
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.MessageRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run message_relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -1,211 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// BatchProposer proposes batches based on available unbatched chunks.
|
||||
type BatchProposer struct {
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
|
||||
batchOrm *orm.Batch
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxChunkNumPerBatch uint64
|
||||
maxL1CommitGasPerBatch uint64
|
||||
maxL1CommitCalldataSizePerBatch uint32
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
batchOrm: orm.NewBatch(db),
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
|
||||
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
dbChunks, err := p.proposeBatchChunks()
|
||||
if err != nil {
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
return
|
||||
}
|
||||
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
numChunks := len(dbChunks)
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
}
|
||||
chunks, err := p.dbChunksToBridgeChunks(dbChunks)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
startChunkIndex := dbChunks[0].Index
|
||||
startChunkHash := dbChunks[0].Hash
|
||||
endChunkIndex := dbChunks[numChunks-1].Index
|
||||
endChunkHash := dbChunks[numChunks-1].Hash
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(dbChunks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
firstChunk := dbChunks[0]
|
||||
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas := firstChunk.TotalL1CommitGas
|
||||
totalChunks := uint64(1)
|
||||
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
|
||||
|
||||
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 16 // version in calldata
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
|
||||
|
||||
// adjusting gas:
|
||||
// add 1 time cold sload (2100 gas) for L1MessageQueue
|
||||
// add 1 time cold address access (2600 gas) for L1MessageQueue
|
||||
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
|
||||
totalL1CommitGas += (2100 + 2600 - 100 - 100)
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
|
||||
if parentBatch != nil {
|
||||
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
|
||||
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
|
||||
}
|
||||
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
|
||||
for i, chunk := range dbChunks[1:] {
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
// adjust batch data hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
|
||||
totalChunks++
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks)
|
||||
// adjust batch header hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
if totalChunks > p.maxChunkNumPerBatch ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return dbChunks[:i+1], nil
|
||||
}
|
||||
}
|
||||
|
||||
var hasChunkTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"start block number", dbChunks[0].StartBlockNumber,
|
||||
"first block timestamp", dbChunks[0].StartBlockTime,
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasChunkTimeout = true
|
||||
}
|
||||
|
||||
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
|
||||
log.Warn("The chunk number of the batch is less than the minimum limit",
|
||||
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return dbChunks, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
chunks := make([]*types.Chunk, len(dbChunks))
|
||||
for i, c := range dbChunks {
|
||||
wrappedBlocks, err := p.l2BlockOrm.GetL2BlocksInRange(p.ctx, c.StartBlockNumber, c.EndBlockNumber)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch wrapped blocks",
|
||||
"start number", c.StartBlockNumber, "end number", c.EndBlockNumber, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
chunks[i] = &types.Chunk{
|
||||
Blocks: wrappedBlocks,
|
||||
}
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
func testBatchProposer(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, chunks)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
// get all batches.
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, uint64(0), batches[0].StartChunkIndex)
|
||||
assert.Equal(t, uint64(0), batches[0].EndChunkIndex)
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
assert.Equal(t, batches[0].Hash, dbChunks[0].BatchHash)
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(dbChunks[0].ProvingStatus))
|
||||
|
||||
blockOrm := orm.NewL2Block(db)
|
||||
blocks, err := blockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, dbChunks[0].Hash, blocks[0].ChunkHash)
|
||||
assert.Equal(t, dbChunks[0].Hash, blocks[1].ChunkHash)
|
||||
}
|
||||
@@ -1,222 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
|
||||
type chunkRowConsumption map[string]uint64
|
||||
|
||||
// add accumulates row consumption per sub-circuit
|
||||
func (crc *chunkRowConsumption) add(rowConsumption *gethTypes.RowConsumption) error {
|
||||
if rowConsumption == nil {
|
||||
return errors.New("rowConsumption is <nil>")
|
||||
}
|
||||
for _, subCircuit := range *rowConsumption {
|
||||
(*crc)[subCircuit.Name] += subCircuit.RowNumber
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// max finds the maximum row consumption among all sub-circuits
|
||||
func (crc *chunkRowConsumption) max() uint64 {
|
||||
var max uint64
|
||||
for _, value := range *crc {
|
||||
if value > max {
|
||||
max = value
|
||||
}
|
||||
}
|
||||
return max
|
||||
}
|
||||
|
||||
// ChunkProposer proposes chunks based on available unchunked blocks.
|
||||
type ChunkProposer struct {
|
||||
ctx context.Context
|
||||
db *gorm.DB
|
||||
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxTxGasPerChunk uint64
|
||||
maxL2TxNumPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
minL1CommitCalldataSizePerChunk uint64
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
|
||||
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
proposedChunk, err := p.proposeChunk()
|
||||
if err != nil {
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
if chunk == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
log.Error("failed to update chunk_hash for l2_blocks", "chunk hash", chunk.Hash, "start block", 0, "end block", 0, "err", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
})
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if len(blocks) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
chunk := &types.Chunk{Blocks: blocks[:1]}
|
||||
firstBlock := chunk.Blocks[0]
|
||||
totalTxGasUsed := firstBlock.Header.GasUsed
|
||||
totalL2TxNum := firstBlock.L2TxsNum()
|
||||
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
|
||||
crc := chunkRowConsumption{}
|
||||
totalL1CommitGas := chunk.EstimateL1CommitGas()
|
||||
|
||||
if err := crc.add(firstBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL2TxNum,
|
||||
p.maxL2TxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
"The first block in chunk exceeds l2 tx gas limit",
|
||||
"block number", firstBlock.Header.Number,
|
||||
"gas used", totalTxGasUsed,
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
crc,
|
||||
max,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for _, block := range blocks[1:] {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas = chunk.EstimateL1CommitGas()
|
||||
|
||||
if err := crc.add(block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk ||
|
||||
totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
|
||||
crc.max() > p.maxRowConsumptionPerChunk {
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
var hasBlockTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
"block number", blocks[0].Header.Number,
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasBlockTimeout = true
|
||||
}
|
||||
|
||||
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
|
||||
log.Warn("The calldata size of the chunk is less than the minimum limit",
|
||||
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return chunk, nil
|
||||
}
|
||||
@@ -1,72 +0,0 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// TODO: Add unit tests that the limits are enforced correctly.
|
||||
func testChunkProposer(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &types.Chunk{
|
||||
Blocks: []*types.WrappedBlock{wrappedBlock1, wrappedBlock2},
|
||||
}
|
||||
expectedHash, err := expectedChunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
|
||||
}
|
||||
|
||||
func testChunkProposerRowConsumption(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
err := l2BlockOrm.InsertL2Blocks(context.Background(), []*types.WrappedBlock{wrappedBlock1, wrappedBlock2})
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 0)
|
||||
}
|
||||
@@ -1,81 +0,0 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func testRelayL1MessageSucceed(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l1MessengerInstance.SendMessage(l1Auth, l2Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l1Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// l1 watch process events
|
||||
l1Watcher.FetchContractEvent()
|
||||
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
// check db status
|
||||
msg, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg.Status), types.MsgPending)
|
||||
assert.Equal(t, msg.Target, l2Auth.From.String())
|
||||
|
||||
// process l1 messages
|
||||
l1Relayer.ProcessSavedEvents()
|
||||
|
||||
l1Message, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, l1Message.Layer2Hash)
|
||||
assert.Equal(t, types.MsgStatus(l1Message.Status), types.MsgSubmitted)
|
||||
|
||||
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(l1Message.Layer2Hash))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
l2Watcher.FetchContractEvent()
|
||||
msg, err = l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg.Status), types.MsgConfirmed)
|
||||
}
|
||||
@@ -20,11 +20,10 @@ RUN find ./ | grep libzktrie.so | xargs -I{} cp {} /app/target/release/
|
||||
FROM scrolltech/go-rust-builder:go-1.19-rust-nightly-2022-12-10 as base
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
|
||||
@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
|
||||
@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
@@ -18,7 +17,7 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
cd /src/rollup/cmd/event_watcher/ && go build -v -p 4 -o /bin/event_watcher
|
||||
|
||||
# Pull event_watcher into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
@@ -18,7 +17,7 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
cd /src/rollup/cmd/gas_oracle/ && go build -v -p 4 -o /bin/gas_oracle
|
||||
|
||||
# Pull gas_oracle into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
RUN go mod download -x
|
||||
|
||||
# Build msg_relayer
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
|
||||
|
||||
# Pull msg_relayer into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/msg_relayer /bin/
|
||||
|
||||
ENTRYPOINT ["msg_relayer"]
|
||||
@@ -1,5 +0,0 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -1,31 +0,0 @@
|
||||
# Download Go dependencies
|
||||
FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
# Support mainland environment.
|
||||
#ENV GOPROXY="https://goproxy.cn,direct"
|
||||
RUN go mod download -x
|
||||
|
||||
|
||||
# Build prover-stats-api
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
|
||||
|
||||
# Pull prover-stats-api into a second stage deploy alpine container \
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/prover-stats-api /bin/
|
||||
|
||||
ENTRYPOINT ["prover-stats-api"]
|
||||
@@ -3,11 +3,10 @@ FROM scrolltech/go-alpine-builder:1.19 as base
|
||||
|
||||
WORKDIR /src
|
||||
COPY go.work* ./
|
||||
COPY ./bridge/go.* ./bridge/
|
||||
COPY ./rollup/go.* ./rollup/
|
||||
COPY ./common/go.* ./common/
|
||||
COPY ./coordinator/go.* ./coordinator/
|
||||
COPY ./database/go.* ./database/
|
||||
COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
@@ -18,7 +17,7 @@ FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
cd /src/rollup/cmd/rollup_relayer/ && go build -v -p 4 -o /bin/rollup_relayer
|
||||
|
||||
# Pull rollup_relayer into a second stage deploy alpine container
|
||||
FROM alpine:latest
|
||||
|
||||
@@ -3,7 +3,7 @@ set -uex
|
||||
|
||||
profile_name=$1
|
||||
|
||||
exclude_dirs=("scroll-tech/bridge/cmd" "scroll-tech/bridge/tests" "scroll-tech/bridge/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
|
||||
exclude_dirs=("scroll-tech/rollup/cmd" "scroll-tech/rollup/tests" "scroll-tech/rollup/mock_bridge" "scroll-tech/coordinator/cmd" "scroll-tech/coordinator/internal/logic/verifier")
|
||||
|
||||
all_packages=$(go list ./... | grep -v "^scroll-tech/${profile_name}$")
|
||||
coverpkg="scroll-tech/${profile_name}"
|
||||
|
||||
@@ -6,7 +6,7 @@ flag_management:
|
||||
default_rules:
|
||||
carryforward: true
|
||||
individual_flags:
|
||||
- name: bridge
|
||||
- name: rollup
|
||||
statuses:
|
||||
- type: project
|
||||
target: auto
|
||||
|
||||
@@ -5,8 +5,9 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
@@ -26,8 +27,9 @@ type Cmd struct {
|
||||
name string
|
||||
args []string
|
||||
|
||||
mu sync.Mutex
|
||||
cmd *exec.Cmd
|
||||
isRunning uint64
|
||||
cmd *exec.Cmd
|
||||
app *exec.Cmd
|
||||
|
||||
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
|
||||
|
||||
@@ -38,13 +40,23 @@ type Cmd struct {
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(name string, args ...string) *Cmd {
|
||||
return &Cmd{
|
||||
func NewCmd(name string, params ...string) *Cmd {
|
||||
cmd := &Cmd{
|
||||
checkFuncs: cmap.New(),
|
||||
name: name,
|
||||
args: args,
|
||||
args: params,
|
||||
ErrChan: make(chan error, 10),
|
||||
cmd: exec.Command(name, params...),
|
||||
app: &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{name}, params...),
|
||||
},
|
||||
}
|
||||
cmd.cmd.Stdout = cmd
|
||||
cmd.cmd.Stderr = cmd
|
||||
cmd.app.Stdout = cmd
|
||||
cmd.app.Stderr = cmd
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
@@ -58,15 +70,14 @@ func (c *Cmd) UnRegistFunc(key string) {
|
||||
}
|
||||
|
||||
func (c *Cmd) runCmd() {
|
||||
cmd := exec.Command(c.args[0], c.args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = c
|
||||
cmd.Stderr = c
|
||||
c.ErrChan <- cmd.Run()
|
||||
fmt.Println("cmd:", append([]string{c.name}, c.args...))
|
||||
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
|
||||
c.ErrChan <- c.cmd.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (c *Cmd) RunCmd(parallel bool) {
|
||||
fmt.Println("cmd:", c.args)
|
||||
if parallel {
|
||||
go c.runCmd()
|
||||
} else {
|
||||
|
||||
@@ -3,41 +3,45 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// IsRunning 1 started, 0 not started.
|
||||
func (c *Cmd) IsRunning() bool {
|
||||
return atomic.LoadUint64(&c.isRunning) == 1
|
||||
}
|
||||
|
||||
func (c *Cmd) runApp() {
|
||||
fmt.Println("cmd:", append([]string{c.name}, c.args...))
|
||||
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
|
||||
c.ErrChan <- c.app.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// RunApp exec's the current binary using name as argv[0] which will trigger the
|
||||
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
|
||||
func (c *Cmd) RunApp(waitResult func() bool) {
|
||||
fmt.Println("cmd: ", append([]string{c.name}, c.args...))
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{c.name}, c.args...),
|
||||
Stderr: c,
|
||||
Stdout: c,
|
||||
}
|
||||
if waitResult != nil {
|
||||
go func() {
|
||||
_ = cmd.Run()
|
||||
c.runApp()
|
||||
}()
|
||||
waitResult()
|
||||
} else {
|
||||
_ = cmd.Run()
|
||||
c.runApp()
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.cmd = cmd
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitExit wait util process exit.
|
||||
func (c *Cmd) WaitExit() {
|
||||
if atomic.LoadUint64(&c.isRunning) == 0 {
|
||||
return
|
||||
}
|
||||
// Wait all the check functions are finished, interrupt loop when appear error.
|
||||
var err error
|
||||
for err == nil && !c.checkFuncs.IsEmpty() {
|
||||
@@ -52,20 +56,18 @@ func (c *Cmd) WaitExit() {
|
||||
}
|
||||
|
||||
// Send interrupt signal.
|
||||
c.mu.Lock()
|
||||
_ = c.cmd.Process.Signal(os.Interrupt)
|
||||
// should use `_ = c.cmd.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
|
||||
_ = c.app.Process.Signal(os.Interrupt)
|
||||
// should use `_ = c.app.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
|
||||
// so we use `Kill` as a temp workaround. And since `WaitExit` is only used in integration tests, so
|
||||
// it won't really affect our functionalities.
|
||||
_ = c.cmd.Process.Kill()
|
||||
c.mu.Unlock()
|
||||
if err = c.app.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
_ = c.app.Process.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
// Interrupt send interrupt signal.
|
||||
func (c *Cmd) Interrupt() {
|
||||
c.mu.Lock()
|
||||
c.ErrChan <- c.cmd.Process.Signal(os.Interrupt)
|
||||
c.mu.Unlock()
|
||||
c.ErrChan <- c.app.Process.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// WaitResult return true when get the keyword during timeout.
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
app := cmd.NewCmd("curTime", "date", "+%Y-%m-%d")
|
||||
app := cmd.NewCmd("date", "+%Y-%m-%d")
|
||||
|
||||
tm := time.Now()
|
||||
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
|
||||
|
||||
@@ -2,6 +2,7 @@ package database
|
||||
|
||||
import (
|
||||
"context"
|
||||
"database/sql"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
@@ -63,17 +64,15 @@ func InitDB(config *Config) (*gorm.DB, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
||||
sqlDB, pingErr := Ping(db)
|
||||
if pingErr != nil {
|
||||
return nil, pingErr
|
||||
}
|
||||
|
||||
sqlDB.SetMaxOpenConns(config.MaxOpenNum)
|
||||
sqlDB.SetMaxIdleConns(config.MaxIdleNum)
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return db, nil
|
||||
}
|
||||
|
||||
@@ -88,3 +87,16 @@ func CloseDB(db *gorm.DB) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Ping check db status
|
||||
func Ping(db *gorm.DB) (*sql.DB, error) {
|
||||
sqlDB, err := db.DB()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err = sqlDB.Ping(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return sqlDB, nil
|
||||
}
|
||||
|
||||
@@ -11,6 +11,10 @@ import (
|
||||
"github.com/mattn/go-colorable"
|
||||
"github.com/mattn/go-isatty"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/version"
|
||||
)
|
||||
|
||||
func TestGormLogger(t *testing.T) {
|
||||
@@ -33,3 +37,26 @@ func TestGormLogger(t *testing.T) {
|
||||
gl.Info(context.Background(), "test %s warn:%v", "testInfo", errors.New("test info"))
|
||||
gl.Trace(context.Background(), time.Now(), func() (string, int64) { return "test trace", 1 }, nil)
|
||||
}
|
||||
|
||||
func TestDB(t *testing.T) {
|
||||
version.Version = "v4.1.98-aaa-bbb-ccc"
|
||||
base := docker.NewDockerApp()
|
||||
base.RunDBImage(t)
|
||||
|
||||
dbCfg := &Config{
|
||||
DSN: base.DBConfig.DSN,
|
||||
DriverName: base.DBConfig.DriverName,
|
||||
MaxOpenNum: base.DBConfig.MaxOpenNum,
|
||||
MaxIdleNum: base.DBConfig.MaxIdleNum,
|
||||
}
|
||||
|
||||
var err error
|
||||
db, err := InitDB(dbCfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
sqlDB, err := Ping(db)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, sqlDB)
|
||||
|
||||
assert.NoError(t, CloseDB(db))
|
||||
}
|
||||
|
||||
@@ -27,6 +27,7 @@ var (
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
|
||||
@@ -36,7 +36,7 @@ func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (i *ImgDB) IsRunning() bool {
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
|
||||
@@ -42,7 +42,7 @@ func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
return img
|
||||
}
|
||||
|
||||
@@ -149,8 +149,8 @@ func (i *ImgGeth) Stop() error {
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) prepare() []string {
|
||||
cmds := []string{"docker", "run", "--rm", "--name", i.name}
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:scroll-v4.3.34
|
||||
FROM scrolltech/l2geth:scroll-v4.3.55
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
@@ -117,7 +117,7 @@ require (
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
|
||||
@@ -434,8 +434,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28 h1:CECBTWhZ5NGAn8lGFB4ooRAYxZns8PXoX8kTR/14c04=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc h1:eK3NOpjgm/b2TQ6rYqWx92Zri0kBuxf6gKjjsVxWKr8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
@@ -576,8 +576,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
||||
1363
common/libzkp/impl/Cargo.lock
generated
1363
common/libzkp/impl/Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
@@ -8,7 +8,7 @@ edition = "2021"
|
||||
crate-type = ["cdylib"]
|
||||
|
||||
[patch.crates-io]
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v0.17.0" }
|
||||
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v2.0.7" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
|
||||
@@ -20,18 +20,17 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.16" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.16" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
prover = { git = "https://github.com/scroll-tech/zkevm-circuits.git", tag = "v0.9.1", default-features = false, features = ["parallel_syn", "scroll", "shanghai"] }
|
||||
|
||||
log = "0.4"
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
once_cell = "1.8.0"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
libc = "0.2"
|
||||
once_cell = "1.8.0"
|
||||
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
8
common/libzkp/impl/Makefile
Normal file
8
common/libzkp/impl/Makefile
Normal file
@@ -0,0 +1,8 @@
|
||||
.PHONY: help fmt clippy test test-ci test-all
|
||||
|
||||
fmt:
|
||||
@cargo fmt --all -- --check
|
||||
|
||||
clippy:
|
||||
@cargo check --all-features
|
||||
@cargo clippy --release -- -D warnings
|
||||
@@ -1,23 +1,38 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
|
||||
use crate::{
|
||||
types::{CheckChunkProofsResponse, ProofResult},
|
||||
utils::{
|
||||
c_char_to_str, c_char_to_vec, file_exists, string_to_c_char, vec_to_c_char, OUTPUT_DIR,
|
||||
},
|
||||
};
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
aggregator::{Prover, Verifier},
|
||||
consts::AGG_VK_FILENAME,
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, ChunkHash, ChunkProof,
|
||||
BatchProof, BlockTrace, ChunkHash, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, panic, ptr::null};
|
||||
use types::eth::BlockTrace;
|
||||
use std::{cell::OnceCell, env, panic, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char) {
|
||||
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
|
||||
init_env_and_log("ffi_batch_prove");
|
||||
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let prover = Prover::from_params_dir(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
|
||||
// VK file must exist, it is optional and logged as a warning in prover.
|
||||
if !file_exists(assets_dir, &AGG_VK_FILENAME) {
|
||||
panic!("{} must exist in folder {}", *AGG_VK_FILENAME, assets_dir);
|
||||
}
|
||||
|
||||
let prover = Prover::from_dirs(params_dir, assets_dir);
|
||||
|
||||
PROVER.set(prover).unwrap();
|
||||
}
|
||||
@@ -30,39 +45,104 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
|
||||
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
.flatten()
|
||||
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
|
||||
let check_result: Result<bool, String> = panic::catch_unwind(|| {
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
|
||||
if chunk_proofs.is_empty() {
|
||||
return Err("provided chunk proofs are empty.".to_string());
|
||||
}
|
||||
|
||||
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
|
||||
|
||||
let valid = prover_ref.check_chunk_proofs(&chunk_proofs);
|
||||
Ok(valid)
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
let r = match check_result {
|
||||
Ok(valid) => CheckChunkProofsResponse {
|
||||
ok: valid,
|
||||
error: None,
|
||||
},
|
||||
Err(err) => CheckChunkProofsResponse {
|
||||
ok: false,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_batch_proof(
|
||||
chunk_hashes: *const c_char,
|
||||
chunk_proofs: *const c_char,
|
||||
) -> *const c_char {
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes).unwrap();
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
|
||||
assert_eq!(chunk_hashes.len(), chunk_proofs.len());
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes)
|
||||
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.into_iter())
|
||||
.collect();
|
||||
if chunk_hashes.len() != chunk_proofs.len() {
|
||||
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
|
||||
chunk_hashes.len(), chunk_proofs.len()));
|
||||
}
|
||||
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.into_iter())
|
||||
.collect();
|
||||
|
||||
let proof_result = panic::catch_unwind(|| {
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_agg_evm_proof(chunk_hashes_proofs, None, OUTPUT_DIR.as_deref())
|
||||
.unwrap();
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).unwrap()
|
||||
});
|
||||
proof_result.map_or(null(), vec_to_c_char)
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
let r = match proof_result {
|
||||
Ok(proof_bytes) => ProofResult {
|
||||
message: Some(proof_bytes),
|
||||
error: None,
|
||||
},
|
||||
Err(err) => ProofResult {
|
||||
message: None,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
||||
@@ -1,23 +1,38 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
|
||||
use crate::{
|
||||
types::ProofResult,
|
||||
utils::{
|
||||
c_char_to_str, c_char_to_vec, file_exists, string_to_c_char, vec_to_c_char, OUTPUT_DIR,
|
||||
},
|
||||
};
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
consts::CHUNK_VK_FILENAME,
|
||||
utils::init_env_and_log,
|
||||
zkevm::{Prover, Verifier},
|
||||
ChunkProof,
|
||||
BlockTrace, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, panic, ptr::null};
|
||||
use types::eth::BlockTrace;
|
||||
use std::{cell::OnceCell, env, panic, ptr::null};
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char) {
|
||||
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
|
||||
init_env_and_log("ffi_chunk_prove");
|
||||
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let prover = Prover::from_params_dir(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
|
||||
// VK file must exist, it is optional and logged as a warning in prover.
|
||||
if !file_exists(assets_dir, &CHUNK_VK_FILENAME) {
|
||||
panic!("{} must exist in folder {}", *CHUNK_VK_FILENAME, assets_dir);
|
||||
}
|
||||
|
||||
let prover = Prover::from_dirs(params_dir, assets_dir);
|
||||
|
||||
PROVER.set(prover).unwrap();
|
||||
}
|
||||
@@ -30,6 +45,8 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier).unwrap();
|
||||
@@ -37,21 +54,45 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
|
||||
let block_traces = c_char_to_vec(block_traces);
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
|
||||
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
|
||||
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
.flatten()
|
||||
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
|
||||
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
|
||||
let block_traces = c_char_to_vec(block_traces);
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
|
||||
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
|
||||
|
||||
let proof_result = panic::catch_unwind(|| {
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.gen_chunk_proof(block_traces, None, OUTPUT_DIR.as_deref())
|
||||
.unwrap();
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_chunk_proof(block_traces, None, None, OUTPUT_DIR.as_deref())
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).unwrap()
|
||||
});
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
proof_result.map_or(null(), vec_to_c_char)
|
||||
let r = match proof_result {
|
||||
Ok(proof_bytes) => ProofResult {
|
||||
message: Some(proof_bytes),
|
||||
error: None,
|
||||
},
|
||||
Err(err) => ProofResult {
|
||||
message: None,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
||||
@@ -2,4 +2,5 @@
|
||||
|
||||
mod batch;
|
||||
mod chunk;
|
||||
mod types;
|
||||
mod utils;
|
||||
|
||||
22
common/libzkp/impl/src/types.rs
Normal file
22
common/libzkp/impl/src/types.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Represents the result of a chunk proof checking operation.
|
||||
// `ok` indicates whether the proof checking was successful.
|
||||
// `error` provides additional details in case the check failed.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct CheckChunkProofsResponse {
|
||||
pub ok: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// Encapsulates the result from generating a proof.
|
||||
// `message` holds the generated proof in byte slice format.
|
||||
// `error` provides additional details in case the proof generation failed.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ProofResult {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<Vec<u8>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
@@ -3,6 +3,7 @@ use std::{
|
||||
env,
|
||||
ffi::{CStr, CString},
|
||||
os::raw::c_char,
|
||||
path::PathBuf,
|
||||
};
|
||||
|
||||
// Only used for debugging.
|
||||
@@ -19,6 +20,17 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
|
||||
cstr.to_bytes().to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
|
||||
CString::new(string).unwrap().into_raw()
|
||||
}
|
||||
|
||||
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
|
||||
CString::new(bytes).unwrap().into_raw()
|
||||
}
|
||||
|
||||
pub(crate) fn file_exists(dir: &str, filename: &str) -> bool {
|
||||
let mut path = PathBuf::from(dir);
|
||||
path.push(filename);
|
||||
|
||||
path.exists()
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
void init_batch_prover(char* params_dir);
|
||||
void init_batch_prover(char* params_dir, char* assets_dir);
|
||||
void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char verify_batch_proof(char* proof);
|
||||
|
||||
void init_chunk_prover(char* params_dir);
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_chunk_vk();
|
||||
char* gen_chunk_proof(char* block_traces);
|
||||
char verify_chunk_proof(char* proof);
|
||||
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// ScrollRegistry is used for scroll metrics.
|
||||
ScrollRegistry = metrics.NewRegistry()
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(ScrollRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
package metrics
|
||||
package observability
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"scroll-tech/common/metrics/ginmetrics"
|
||||
"scroll-tech/common/observability/ginmetrics"
|
||||
)
|
||||
|
||||
// Use register the gin metric
|
||||
35
common/observability/probes.go
Normal file
35
common/observability/probes.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package observability
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
)
|
||||
|
||||
// ProbesController probe check controller
|
||||
type ProbesController struct {
|
||||
db *gorm.DB
|
||||
}
|
||||
|
||||
// NewProbesController returns an ProbesController instance
|
||||
func NewProbesController(db *gorm.DB) *ProbesController {
|
||||
return &ProbesController{
|
||||
db: db,
|
||||
}
|
||||
}
|
||||
|
||||
// HealthCheck the api controller for health check
|
||||
func (a *ProbesController) HealthCheck(c *gin.Context) {
|
||||
if _, err := database.Ping(a.db); err != nil {
|
||||
types.RenderFatal(c, err)
|
||||
return
|
||||
}
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
|
||||
// Ready the api controller for ready check
|
||||
func (a *ProbesController) Ready(c *gin.Context) {
|
||||
types.RenderSuccess(c, nil)
|
||||
}
|
||||
@@ -1,4 +1,4 @@
|
||||
package metrics
|
||||
package observability
|
||||
|
||||
import (
|
||||
"errors"
|
||||
@@ -11,17 +11,17 @@ import (
|
||||
|
||||
"github.com/gin-contrib/pprof"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Server starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Server(c *cli.Context, reg *prometheus.Registry) {
|
||||
func Server(c *cli.Context, db *gorm.DB) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
@@ -33,6 +33,10 @@ func Server(c *cli.Context, reg *prometheus.Registry) {
|
||||
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
|
||||
})
|
||||
|
||||
probeController := NewProbesController(db)
|
||||
r.GET("/health", probeController.HealthCheck)
|
||||
r.GET("/ready", probeController.Ready)
|
||||
|
||||
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
@@ -10,6 +10,16 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// BatchMeta contains metadata of a batch.
|
||||
type BatchMeta struct {
|
||||
StartChunkIndex uint64
|
||||
StartChunkHash string
|
||||
EndChunkIndex uint64
|
||||
EndChunkHash string
|
||||
TotalL1CommitGas uint64
|
||||
TotalL1CommitCalldataSize uint32
|
||||
}
|
||||
|
||||
// BatchHeader contains batch header info to be committed.
|
||||
type BatchHeader struct {
|
||||
// Encoded in BatchHeaderV0Codec
|
||||
|
||||
@@ -3,20 +3,31 @@ package types
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
|
||||
const CalldataNonZeroByteGas = 16
|
||||
|
||||
// GetKeccak256Gas calculates keccak256 hash gas.
|
||||
func GetKeccak256Gas(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption"`
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption"`
|
||||
txPayloadLengthCache map[string]uint64
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
@@ -87,17 +98,15 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
size += uint64(len(txData.Data))
|
||||
size += 4 // 4 bytes payload length
|
||||
size += w.getTxPayloadLength(txData)
|
||||
}
|
||||
size += 60 // 60 bytes BlockContext
|
||||
return size
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range w.Transactions {
|
||||
@@ -106,25 +115,15 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
continue
|
||||
}
|
||||
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += 16 * 4 // size of a uint32 field
|
||||
total += getKeccakGas(txPayloadLength) // l2 tx hash
|
||||
txPayloadLength := w.getTxPayloadLength(txData)
|
||||
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
|
||||
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
|
||||
}
|
||||
|
||||
// 60 bytes BlockContext calldata
|
||||
total += CalldataNonZeroByteGas * 60
|
||||
|
||||
// sload
|
||||
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
|
||||
|
||||
@@ -135,13 +134,47 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
return total
|
||||
}
|
||||
|
||||
// L2TxsNum calculates the number of l2 txs.
|
||||
func (w *WrappedBlock) L2TxsNum() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
|
||||
if w.txPayloadLengthCache == nil {
|
||||
w.txPayloadLengthCache = make(map[string]uint64)
|
||||
}
|
||||
return count
|
||||
|
||||
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
|
||||
return length
|
||||
}
|
||||
|
||||
rlpTxData, err := convertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
|
||||
return 0
|
||||
}
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
|
||||
return txPayloadLength
|
||||
}
|
||||
|
||||
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
data, err := hexutil.Decode(txData.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
|
||||
}
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
rlpTxData, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
|
||||
}
|
||||
|
||||
return rlpTxData, nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
@@ -65,23 +64,7 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
data, err := hexutil.Decode(txData.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, err := tx.MarshalBinary()
|
||||
rlpTxData, err := convertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,14 +129,10 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
return totalL1CommitGas
|
||||
}
|
||||
|
||||
@@ -38,11 +38,15 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(298), wrappedBlock.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(2), wrappedBlock.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(6006), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
@@ -56,11 +60,15 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
|
||||
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(1), wrappedBlock2.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
@@ -75,6 +83,8 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
@@ -136,3 +146,81 @@ func TestChunkHash(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
|
||||
}
|
||||
|
||||
func TestErrorPaths(t *testing.T) {
|
||||
// test 1: Header.Number is not a uint64
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
|
||||
bytes, err := wrappedBlock.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "block number is not uint64")
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
for i := 0; i < 65537; i++ {
|
||||
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
|
||||
}
|
||||
|
||||
bytes, err = wrappedBlock.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
|
||||
wrappedBlock.Transactions[0].Data = "not-a-hex"
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "hex string without 0x prefix")
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
|
||||
_, err = chunk.Hash(0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid byte")
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
for i := 0; i < 65535; i++ {
|
||||
tx := &wrappedBlock2.Transactions[i]
|
||||
txCopy := *tx
|
||||
txCopy.Nonce = uint64(i + 1)
|
||||
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
|
||||
}
|
||||
|
||||
bytes, err = wrappedBlock2.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
|
||||
}
|
||||
|
||||
@@ -103,6 +103,12 @@ const (
|
||||
ProverTaskFailureTypeUndefined ProverTaskFailureType = iota
|
||||
// ProverTaskFailureTypeTimeout prover task failure of timeout
|
||||
ProverTaskFailureTypeTimeout
|
||||
// ProverTaskFailureTypeSubmitStatusNotOk prover task failure of submit status not ok
|
||||
ProverTaskFailureTypeSubmitStatusNotOk
|
||||
// ProverTaskFailureTypeVerifiedFailed prover task failure of verified failed by coordinator
|
||||
ProverTaskFailureTypeVerifiedFailed
|
||||
// ProverTaskFailureTypeServerError collect occur error
|
||||
ProverTaskFailureTypeServerError
|
||||
)
|
||||
|
||||
func (r ProverTaskFailureType) String() string {
|
||||
@@ -111,8 +117,14 @@ func (r ProverTaskFailureType) String() string {
|
||||
return "prover task failure undefined"
|
||||
case ProverTaskFailureTypeTimeout:
|
||||
return "prover task failure timeout"
|
||||
case ProverTaskFailureTypeSubmitStatusNotOk:
|
||||
return "prover task failure validated submit proof status not ok"
|
||||
case ProverTaskFailureTypeVerifiedFailed:
|
||||
return "prover task failure verified failed"
|
||||
case ProverTaskFailureTypeServerError:
|
||||
return "prover task failure server exception"
|
||||
default:
|
||||
return "illegal prover task failure type"
|
||||
return fmt.Sprintf("illegal prover task failure type (%d)", int32(r))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -126,8 +138,8 @@ const (
|
||||
ProvingTaskUnassigned
|
||||
// ProvingTaskAssigned : proving_task is assigned to be proved
|
||||
ProvingTaskAssigned
|
||||
// ProvingTaskProved DEPRECATED: proof has been returned by prover
|
||||
ProvingTaskProved
|
||||
// ProvingTaskProvedDEPRECATED DEPRECATED: proof has been returned by prover
|
||||
ProvingTaskProvedDEPRECATED
|
||||
// ProvingTaskVerified : proof is valid
|
||||
ProvingTaskVerified
|
||||
// ProvingTaskFailed : fail to generate proof
|
||||
@@ -140,7 +152,7 @@ func (ps ProvingStatus) String() string {
|
||||
return "unassigned"
|
||||
case ProvingTaskAssigned:
|
||||
return "assigned"
|
||||
case ProvingTaskProved:
|
||||
case ProvingTaskProvedDEPRECATED:
|
||||
return "proved"
|
||||
case ProvingTaskVerified:
|
||||
return "verified"
|
||||
|
||||
@@ -58,8 +58,8 @@ func TestProvingStatus(t *testing.T) {
|
||||
"assigned",
|
||||
},
|
||||
{
|
||||
"ProvingTaskProved",
|
||||
ProvingTaskProved,
|
||||
"ProvingTaskProvedDEPRECATED",
|
||||
ProvingTaskProvedDEPRECATED,
|
||||
"proved",
|
||||
},
|
||||
{
|
||||
|
||||
@@ -3,12 +3,21 @@ package types
|
||||
const (
|
||||
// Success shows OK.
|
||||
Success = 0
|
||||
// InternalServerError shows a fatal error in the server
|
||||
InternalServerError = 500
|
||||
|
||||
// ErrJWTCommonErr jwt common error
|
||||
ErrJWTCommonErr = 50000
|
||||
// ErrJWTTokenExpired jwt token expired
|
||||
ErrJWTTokenExpired = 50001
|
||||
|
||||
// ErrProverStatsAPIParameterInvalidNo is invalid params
|
||||
ErrProverStatsAPIParameterInvalidNo = 10001
|
||||
// ErrProverStatsAPIProverTaskFailure is getting prover task error
|
||||
ErrProverStatsAPIProverTaskFailure = 10002
|
||||
// ErrProverStatsAPIProverTotalRewardFailure is getting total rewards error
|
||||
ErrProverStatsAPIProverTotalRewardFailure = 10003
|
||||
|
||||
// ErrCoordinatorParameterInvalidNo is invalid params
|
||||
ErrCoordinatorParameterInvalidNo = 20001
|
||||
// ErrCoordinatorGetTaskFailure is getting prover task error
|
||||
|
||||
@@ -13,6 +13,18 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ProofFailureType the proof failure type
|
||||
type ProofFailureType int
|
||||
|
||||
const (
|
||||
// ProofFailureUndefined the undefined type proof failure type
|
||||
ProofFailureUndefined ProofFailureType = iota
|
||||
// ProofFailurePanic proof failure for prover panic
|
||||
ProofFailurePanic
|
||||
// ProofFailureNoPanic proof failure for no prover panic
|
||||
ProofFailureNoPanic
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
@@ -199,6 +211,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
|
||||
|
||||
// TaskMsg is a wrapper type around db ProveTask type.
|
||||
type TaskMsg struct {
|
||||
UUID string `json:"uuid"`
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`
|
||||
@@ -250,12 +263,14 @@ type ChunkInfo struct {
|
||||
|
||||
// ChunkProof includes the proof info that are required for chunk verification and rollup.
|
||||
type ChunkProof struct {
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"` // cross-reference between cooridinator computation and prover compution
|
||||
StorageTrace []byte `json:"storage_trace"`
|
||||
Protocol []byte `json:"protocol"`
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -263,6 +278,8 @@ type BatchProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an BatchProof is in a legal format
|
||||
|
||||
@@ -108,7 +108,7 @@ func TestProofDetailHash(t *testing.T) {
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "72a00232c1fcb100b1b67e6d12cd449e5d2d890e3a66e50f4c23499d4990766f"
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
|
||||
53
common/types/response.go
Normal file
53
common/types/response.go
Normal file
@@ -0,0 +1,53 @@
|
||||
package types
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
)
|
||||
|
||||
// Response the response schema
|
||||
type Response struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data interface{} `json:"data"`
|
||||
}
|
||||
|
||||
// RenderJSON renders response with json
|
||||
func RenderJSON(ctx *gin.Context, errCode int, err error, data interface{}) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: errCode,
|
||||
ErrMsg: errMsg,
|
||||
Data: data,
|
||||
}
|
||||
ctx.JSON(http.StatusOK, renderData)
|
||||
}
|
||||
|
||||
// RenderSuccess renders success response with json
|
||||
func RenderSuccess(ctx *gin.Context, data interface{}) {
|
||||
RenderJSON(ctx, Success, nil, data)
|
||||
}
|
||||
|
||||
// RenderFailure renders failure response with json
|
||||
func RenderFailure(ctx *gin.Context, errCode int, err error) {
|
||||
RenderJSON(ctx, errCode, err, nil)
|
||||
}
|
||||
|
||||
// RenderFatal renders fatal response with json
|
||||
func RenderFatal(ctx *gin.Context, err error) {
|
||||
var errMsg string
|
||||
if err != nil {
|
||||
errMsg = err.Error()
|
||||
}
|
||||
renderData := Response{
|
||||
ErrCode: InternalServerError,
|
||||
ErrMsg: errMsg,
|
||||
Data: nil,
|
||||
}
|
||||
ctx.Set("errcode", InternalServerError)
|
||||
ctx.JSON(http.StatusInternalServerError, renderData)
|
||||
}
|
||||
27
common/utils/http.go
Normal file
27
common/utils/http.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StartHTTPServer a public http server to be used.
|
||||
func StartHTTPServer(address string, handler http.Handler) (*http.Server, error) {
|
||||
srv := &http.Server{
|
||||
Handler: handler,
|
||||
Addr: address,
|
||||
ReadTimeout: time.Second * 3,
|
||||
WriteTimeout: time.Second * 3,
|
||||
IdleTimeout: time.Second * 12,
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- srv.ListenAndServe()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return nil, err
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
return srv, nil
|
||||
}
|
||||
@@ -2,6 +2,9 @@ package utils
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"fmt"
|
||||
"math/big"
|
||||
"time"
|
||||
|
||||
"github.com/modern-go/reflect2"
|
||||
@@ -50,3 +53,9 @@ func Loop(ctx context.Context, period time.Duration, f func()) {
|
||||
func IsNil(i interface{}) bool {
|
||||
return i == nil || reflect2.IsNil(i)
|
||||
}
|
||||
|
||||
// RandomURL return a random port endpoint.
|
||||
func RandomURL() string {
|
||||
id, _ := rand.Int(rand.Reader, big.NewInt(5000-1))
|
||||
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
|
||||
}
|
||||
|
||||
21
common/version/prover_version.go
Normal file
21
common/version/prover_version.go
Normal file
@@ -0,0 +1,21 @@
|
||||
package version
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
if len(remote) != 4 {
|
||||
return false
|
||||
}
|
||||
local := strings.Split(Version, "-")
|
||||
if len(local) != 4 {
|
||||
return false
|
||||
}
|
||||
// compare the `scroll_prover` version
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
@@ -3,10 +3,9 @@ package version
|
||||
import (
|
||||
"fmt"
|
||||
"runtime/debug"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var tag = "v4.1.46"
|
||||
var tag = "v4.3.9"
|
||||
|
||||
var commit = func() string {
|
||||
if info, ok := debug.ReadBuildInfo(); ok {
|
||||
@@ -30,19 +29,3 @@ var ZkVersion = "000000-000000"
|
||||
|
||||
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
|
||||
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
|
||||
|
||||
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
|
||||
func CheckScrollProverVersion(proverVersion string) bool {
|
||||
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
|
||||
// so split-by-'-' length should be 4
|
||||
remote := strings.Split(proverVersion, "-")
|
||||
if len(remote) != 4 {
|
||||
return false
|
||||
}
|
||||
local := strings.Split(Version, "-")
|
||||
if len(local) != 4 {
|
||||
return false
|
||||
}
|
||||
// compare the `scroll_prover` version
|
||||
return remote[2] == local[2]
|
||||
}
|
||||
|
||||
@@ -1,78 +1,78 @@
|
||||
# Scroll Contracts
|
||||
|
||||
Note: For more comprehensive documentation, see [`./docs/`](./docs).
|
||||
This directory contains the solidity code for Scroll L1 bridge and rollup contracts and L2 bridge and pre-deployed contracts. The [`specs`](../specs/) folder describes the overall Scroll protocol including the cross-domain messaging and rollup process. You can also find contract APIs and more details in the [`docs`](./docs) folder.
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
integration-test
|
||||
|- xxx.test.ts - "Hardhat integration tests"
|
||||
lib
|
||||
|- forge-std - "foundry dependency"
|
||||
scripts
|
||||
|- deploy_xxx.ts - "hardhat deploy script"
|
||||
|- foundry - "foundry deploy scripts"
|
||||
src
|
||||
|- test
|
||||
| `- xxx.t.sol - "Unit testi in solidity"
|
||||
`- xxx.sol - "solidity contract"
|
||||
.gitmodules - "foundry dependecy modules"
|
||||
foundry.toml - "configure foundry"
|
||||
hardhat.config.ts - "configure hardhat"
|
||||
remappings.txt - "foundry dependency mappings"
|
||||
<pre>
|
||||
├── <a href="./docs/">docs</a>: Documentation for the contracts
|
||||
├── <a href="./integration-test/">integration-test</a>: Hardhat integration tests
|
||||
├── <a href="./lib/">lib</a>: External libraries and testing tools
|
||||
├── <a href="./scripts">scripts</a>: Deployment scripts
|
||||
├── <a href="./src">src</a>
|
||||
│ ├── <a href="./src/gas-swap/">gas-swap</a>: Utility contract that allows gas payment in other tokens
|
||||
│ ├── <a href="./src/interfaces/">interfaces</a>: Common contract interfaces
|
||||
│ ├── <a href="./src/L1/">L1</a>: Contracts deployed on the L1 (Ethereum)
|
||||
│ │ ├── <a href="./src/L1/gateways/">gateways</a>: Gateway router and token gateway contracts
|
||||
│ │ ├── <a href="./src/L1/rollup/">rollup</a>: Rollup contracts for data availability and finalization
|
||||
│ │ ├── <a href="./src/L1/IL1ScrollMessenger.sol">IL1ScrollMessenger.sol</a>: L1 Scroll messenger interface
|
||||
│ │ └── <a href="./src/L1/L1ScrollMessenger.sol">L1ScrollMessenger.sol</a>: L1 Scroll messenger contract
|
||||
│ ├── <a href="./src/L2/">L2</a>: Contracts deployed on the L2 (Scroll)
|
||||
│ │ ├── <a href="./src/L2/gateways/">gateways</a>: Gateway router and token gateway contracts
|
||||
│ │ ├── <a href="./src/L2/predeploys/">predeploys</a>: Pre-deployed contracts on L2
|
||||
│ │ ├── <a href="./src/L2/IL2ScrollMessenger.sol">IL2ScrollMessenger.sol</a>: L2 Scroll messenger interface
|
||||
│ │ └── <a href="./src/L2/L2ScrollMessenger.sol">L2ScrollMessenger.sol</a>: L2 Scroll messenger contract
|
||||
│ ├── <a href="./src/libraries/">libraries</a>: Shared contract libraries
|
||||
│ ├── <a href="./src/misc/">misc</a>: Miscellaneous contracts
|
||||
│ ├── <a href="./src/mocks/">mocks</a>: Mock contracts used in the testing
|
||||
│ ├── <a href="./src/rate-limiter/">rate-limiter</a>: Rater limiter contract
|
||||
│ └── <a href="./src/test/">test</a>: Unit tests in solidity
|
||||
├── <a href="./foundry.toml">foundry.toml</a>: Foundry configuration
|
||||
├── <a href="./hardhat.config.ts">hardhat.config.ts</a>: Hardhat configuration
|
||||
├── <a href="./remappings.txt">remappings.txt</a>: Foundry dependency mappings
|
||||
...
|
||||
```
|
||||
</pre>
|
||||
|
||||
## Dependencies
|
||||
|
||||
### Node.js
|
||||
|
||||
First install [`Node.js`](https://nodejs.org/en) and [`npm`](https://www.npmjs.com/).
|
||||
Run the following command to install [`yarn`](https://classic.yarnpkg.com/en/):
|
||||
|
||||
```bash
|
||||
npm install --global yarn
|
||||
```
|
||||
|
||||
### Foundry
|
||||
|
||||
First run the command below to get foundryup, the Foundry toolchain installer:
|
||||
Install `foundryup`, the Foundry toolchain installer:
|
||||
|
||||
```bash
|
||||
curl -L https://foundry.paradigm.xyz | bash
|
||||
```
|
||||
|
||||
If you do not want to use the redirect, feel free to manually download the foundryup installation script from [here](https://raw.githubusercontent.com/foundry-rs/foundry/master/foundryup/foundryup).
|
||||
If you do not want to use the redirect, feel free to manually download the `foundryup` installation script from [here](https://raw.githubusercontent.com/foundry-rs/foundry/master/foundryup/foundryup).
|
||||
|
||||
Then, run `foundryup` in a new terminal session or after reloading your `PATH`.
|
||||
Then, run `foundryup` in a new terminal session or after reloading `PATH`.
|
||||
|
||||
Other ways to install Foundry can be found [here](https://github.com/foundry-rs/foundry#installation).
|
||||
|
||||
### Hardhat
|
||||
|
||||
Run the following command to install [Hardhat](https://hardhat.org/) and other dependencies.
|
||||
|
||||
```
|
||||
yarn install
|
||||
```
|
||||
|
||||
## Build
|
||||
|
||||
- Run `git submodule update --init --recursive` to initialise git submodules.
|
||||
- Run `git submodule update --init --recursive` to initialize git submodules.
|
||||
- Run `yarn prettier:solidity` to run linting in fix mode, will auto-format all solidity codes.
|
||||
- Run `yarn prettier` to run linting in fix mode, will auto-format all typescript codes.
|
||||
- Run `yarn prepare` to install the precommit linting hook
|
||||
- Run `yarn prepare` to install the precommit linting hook.
|
||||
- Run `forge build` to compile contracts with foundry.
|
||||
- Run `npx hardhat compile` to compile with hardhat.
|
||||
- Run `forge test -vvv` to run foundry units tests. It will compile all contracts before running the unit tests.
|
||||
- Run `npx hardhat test` to run integration tests. It may not compile all contracts before running, it's better to run `npx hardhat compile` first.
|
||||
|
||||
## TODO
|
||||
|
||||
- [ ] unit tests
|
||||
- [ ] L1 Messenger
|
||||
- [x] L1 Gateways
|
||||
- [x] L1 Gateway Router
|
||||
- [ ] L2 Messenger
|
||||
- [x] L2 Gateways
|
||||
- [x] L2 Gateway Router
|
||||
- [x] ScrollStandardERC20Factory
|
||||
- [x] Whitelist
|
||||
- [ ] SimpleGasOracle
|
||||
- [ ] integration tests
|
||||
- [x] ERC20Gateway
|
||||
- [x] GatewayRouter
|
||||
- [ ] ZKRollup contracts
|
||||
- [x] Gas Oracle contracts for cross chain message call
|
||||
- [ ] ERC721/ERC115 interface design
|
||||
- [ ] add proof verification codes
|
||||
- [ ] security analysis
|
||||
|
||||
@@ -1,33 +0,0 @@
|
||||
# Cross Domain Messaging
|
||||
|
||||
Like other layer 2 protocol, Scroll allow dapps to communicate between layer 1 and layer 2. More specifically, dapps on layer 1 can trigger contract functions in layer 2, and vice versa.
|
||||
|
||||
## Message Between L1 and L2
|
||||
|
||||
The Scroll protocol implements two core contracts `L1ScrollMessenger` and `L2ScrollMessenger` to enable cross domain messaging. The only entry to send cross domain message is to call the following function:
|
||||
|
||||
```solidity
|
||||
function sendMessage(
|
||||
address _to,
|
||||
bytes memory _message,
|
||||
uint256 _gasLimit
|
||||
) external payable
|
||||
```
|
||||
|
||||
The function is attached in both messenger in layer 1 and layer 2. After that, the Sequencer will handle the rest part for you. We will explain the detailed workflow in the following docs.
|
||||
|
||||
### Send Message from L1 to L2
|
||||
|
||||
As described above, the first step is to call `L1ScrollMessenger.sendMessage` in layer 1. The `L1ScrollMessenger` contract will emit a `SentMessage` event, which will be notified by the Sequencer. The Sequencer will for the confirmation of the function call in layer 1. Normally, the Sequencer will wait for 10-20 blocks. After that, the Sequencer will initiate a transaction in layer 2, calling function `L2ScrollMessenger.relayMessage` and finally, the message is executed in layer 2.
|
||||
|
||||
The execution in layer 2 may be failed due to out of gas problem. In such case, one can call `L1ScrollMessenger.replayMessage` to replace the message with a larger gas limit. And the Sequencer will follow the steps and execute the message again in layer 2.
|
||||
|
||||
### Send Message from L2 to L1
|
||||
|
||||
Similar to sending message from L1 to L2, you should call `L2ScrollMessenger.sendMessage` first in layer 2. The `L2ScrollMessenger` contract will emit a `SentMessage` event, which will be notified by the Sequencer. Unlike above, the Sequencer will first batch submit layer 2 transactions (or block) to `ZKRollup` contract in layer 1. Then the Sequencer will wait the proof generated by prover and submit the proof to `ZKRollup` contract in layer 1 again. Finally, anyone can call `L1ScrollMessenger.relayMessageWithProof` with correct proof to execute the message in layer 1.
|
||||
|
||||
Currently, for the safety reason, we only allow privileged contracts to send cross domain messages. And only privileged accounts can call `L2ScrollMessenger.relayMessage`.
|
||||
|
||||
## Fee For Sending Message
|
||||
|
||||
to be discussed.
|
||||
@@ -1,22 +0,0 @@
|
||||
# Overview
|
||||
|
||||

|
||||
|
||||
The above picture is the overview of the contract design. There are several components both in layer 1 and layer 2: L1/L2 Scroll Messenger, various L1/L2 Gateways and L1/L2 Gateway Router. Besides these, there is a Rollup component only in layer 1.
|
||||
|
||||
The followings are the detailed docs for each component (docs are generated automatically by `@primitivefi/hardhat-dodoc` plugin):
|
||||
|
||||
- [L1 Scroll Messenger](./apis/L1ScrollMessenger.md) and [L2 Scroll Messenger](./apis/L2ScrollMessenger.md): Main entry for sending and relaying cross domain message.
|
||||
- [Rollup](./apis/ZKRollup.md)
|
||||
- [L1 Gateway Router](./apis/L1GatewayRouter.md) and [L2 Gateway Router](./apis/L2GatewayRouter.md): Router contract for depositing/withdrawing Ethers and ERC20 tokens.
|
||||
- L1/L2 Gateways:
|
||||
- [L1 Standard ERC20 Gateway](./apis/L1StandardERC20Gateway.md) and [L2 Standard ERC20 Gateway](./apis/L2StandardERC20Gateway.md)
|
||||
- [L1 WETH Gateway](./apis/L1WETHGateway.md) and [L2 WETH Gateway](./apis/L2WETHGateway.md)
|
||||
- [L1 ERC721 Gateway](./apis/L1ERC721Gateway.md) and [L2 ERC721 Gateway](./apis/L2ERC721Gateway.md)
|
||||
- [L1 ERC1155 Gateway](./apis/L1ERC1155Gateway.md) and [L2 ERC1155 Gateway](./apis/L2ERC1155Gateway.md)
|
||||
- [ScrollStandardERC20Factory](./apis/ScrollStandardERC20Factory.md): The `ScrollStandardERC20` token factory used by `L2StandardERC20Gateway`.
|
||||
|
||||
There are two main applications: Token Bridge and Cross Domain Messaging. You can find the documentations in the links below:
|
||||
|
||||
- [Token Bridge](./TokenBridge.md): moving token from layer 1 to layer 2, or from layer 2 to layer 1.
|
||||
- [Cross Domain Messaging](./CrossDomainMessaging.md): sending data to layer 2 from layer 1, or sending data to layer 2 from layer 1. Basically, it will help to trigger function call cross layer. The token bridge also use cross domain messaging to achieve its functionality.
|
||||
@@ -1,134 +0,0 @@
|
||||
# Bridge Token Between Layer 1 and Layer 2
|
||||
|
||||
The Token Bridge of Scroll Protocol offers a way to move assets from layer 1 to layer 2 and back, including Ether, ERC20 token, ERC-721 token, ERC-1155 token, etc. The asset should be deposited and locked in layer 1 and then in exchange of the same amount of an equivalent token on layer 2. For example, if you deposit 1000 Ether in layer 1, you will get 1000 Ether in layer 2 for return. And if you withdraw 1000 Ether in layer 2, you will get 1000 Ether in layer 1 for return.
|
||||
|
||||
The Ether and ERC20 tokens can be deposited or withdrawn using one single contract `GatewayRouter` (`L1GatewayRouter` in layer 1 and `L2GatewayRouter` in layer 2). The ERC-721 tokens and ERC-1155 tokens can be deposited or withdrawn using the corresponding `ERC1155Gateway` and `ERC721Gateway` in layer 1 or layer 2 (They may be integrated into `GatewayRouter` in the future).
|
||||
|
||||
## Bridge Ether
|
||||
|
||||
To bridge Ether from layer 1 to layer 2, one can use `L1GatewayRouter.depositETH`. This will transfer ethers to the `L1ScrollMessenger` contract on the layer 1 and credits the same amount of ether to you in layer 2 at the specified address.
|
||||
|
||||
```solidity
|
||||
function depositETH(uint256 _gasLimit) external payable;
|
||||
|
||||
function depositETH(address _to, uint256 _gasLimit) external payable;
|
||||
|
||||
```
|
||||
|
||||
In the layer 1, all deposited Ether will be locked in `L1ScrollMessenger` contract. It means your deposited Ether will firstly be transfered to `L1GatewayRouter` contract and then to `L1ScrollMessenger` contract.
|
||||
|
||||
To withdraw Ether from layer 2 to layer 1, one can use `L2GatewayRouter.withdrawETH`.
|
||||
|
||||
```solidity
|
||||
function withdrawETH(uint256 _gasLimit) external payable;
|
||||
|
||||
function withdrawETH(address _to, uint256 _gasLimit) external payable;
|
||||
|
||||
```
|
||||
|
||||
In layer 2, the `L2ScrollMessenger` holds infinite amount of Ether at the beginning. All your withdrawn Ether will be transfered back to `L2ScrollMessenger`, just like the process in layer 1.
|
||||
|
||||
In addition, you can actually call `sendMessage` from the `L1ScrollMessenger` or `L2ScrollMessenger` contract to deposit or withdraw Ether. The `L1GatewayRouter.depositETH` and `L2GatewayRouter.withdrawETH` are just alias for `L1ScrollMessenger/L2ScrollMessenger.sendMessage`.
|
||||
|
||||
## Bridge ERC20 Tokens
|
||||
|
||||
We use the similar design as [Arbitrum protocol](https://developer.offchainlabs.com/docs/bridging_assets#bridging-erc20-tokens) do. Several gateway contracts are used to bridge different kinds of ERC20 tokens, such as Wrapped Ether, standard ERC20 tokens, etc.
|
||||
|
||||
We implement a `StandardERC20Gateway` to deposit and withdraw standard ERC20 tokens. The standard procedure to deposit ERC20 tokens is to call `L1GatewayRouter.depositERC20` in layer 1. The token will be locked in `L1StandardERC20Gateway` contract in layer 1. The the standard procedure to withdraw ERC20 tokens is to call `L2GatewayRouter.withdrawRC20` in layer 2 and the token will be burned in layer 2.
|
||||
|
||||
For many other non-standard ERC20 tokens, we provide a custom ERC20 gateway. Anyone can implement such gateway as long as it implements all required interfaces. We implement the Wrapped Ether gateway as an example. To deposit or withdraw Wrapped Ether, one should first unwrap it to Ether, then transfer the Ether to `ScrollMessenger` just like Ether bridging.
|
||||
|
||||
### Passing data when depositing ERC20 tokens
|
||||
|
||||
The Scroll protocol offer a way to call another contract after depositing the token in layer 2 by calling `L1GatewayRouter.depositERC20AndCall` in layer 1. The ERC20 token in layer 2 implements the [ERC 677 Standard](https://github.com/ethereum/EIPs/issues/677). By using `transferAndCall` function, we can transfer the token to corresponding recipient in layer 2 and then call the recipient with passed data.
|
||||
|
||||
```solidity
|
||||
function depositERC20AndCall(
|
||||
address _token,
|
||||
address _to,
|
||||
uint256 _amount,
|
||||
bytes memory _data,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
```
|
||||
|
||||
Like Bridging Ether, all above functionality can be achieved by calling corresponding function in ERC20Gateway contract.
|
||||
|
||||
## Bridge ERC-721/ERC-1155 Tokens
|
||||
|
||||
The depositing/withdrawing ERC-721 or ERC-1155 tokens works very similar to ERC20 tokens. One can use the following function to deposit ERC-721/ERC-1155 tokens in layer 1.
|
||||
|
||||
```solidity
|
||||
function depositERC1155(
|
||||
address _token,
|
||||
uint256 _tokenId,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
function depositERC1155(
|
||||
address _token,
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
function depositERC721(
|
||||
address _token,
|
||||
uint256 _tokenId,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
function depositERC721(
|
||||
address _token,
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
```
|
||||
|
||||
One can use the following function to withdraw ERC-721/ERC-1155 tokens in layer 2.
|
||||
|
||||
```solidity
|
||||
function withdrawERC1155(
|
||||
address _token,
|
||||
uint256 _tokenId,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
function withdrawERC1155(
|
||||
address _token,
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _amount,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
function withdrawERC721(
|
||||
address _token,
|
||||
uint256 _tokenId,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
function withdrawERC721(
|
||||
address _token,
|
||||
address _to,
|
||||
uint256 _tokenId,
|
||||
uint256 _gasLimit
|
||||
) external;
|
||||
|
||||
```
|
||||
|
||||
To save the gas usage, we also provide a batch deposit/withdraw function, such as `batchDepositERC1155` and `batchDepositERC721`, by passing a list of token ids to the function.
|
||||
|
||||
## Drop Depositing/Withdrawing
|
||||
|
||||
Coming soon...
|
||||
|
||||
## Force Exit
|
||||
|
||||
Coming soon...
|
||||
@@ -260,6 +260,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -354,6 +371,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### updateTokenMapping
|
||||
|
||||
```solidity
|
||||
@@ -530,6 +563,23 @@ Emitted when some ERC1155 token is refunded.
|
||||
| tokenId | uint256 | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -227,6 +227,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -299,6 +316,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### updateTokenMapping
|
||||
|
||||
```solidity
|
||||
@@ -469,6 +502,23 @@ Emitted when some ERC721 token is refunded.
|
||||
| recipient `indexed` | address | undefined |
|
||||
| tokenId | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
### UpdateTokenMapping
|
||||
|
||||
```solidity
|
||||
|
||||
@@ -93,28 +93,6 @@ Mapping from L1 message hash to drop status.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bool | undefined |
|
||||
|
||||
### isL1MessageSent
|
||||
|
||||
```solidity
|
||||
function isL1MessageSent(bytes32) external view returns (bool)
|
||||
```
|
||||
|
||||
Mapping from L1 message hash to sent status.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -183,6 +161,28 @@ The address of L1MessageQueue contract.
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### messageSendTimestamp
|
||||
|
||||
```solidity
|
||||
function messageSendTimestamp(bytes32) external view returns (uint256)
|
||||
```
|
||||
|
||||
Mapping from L1 message hash to the timestamp when the message is sent.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | bytes32 | undefined |
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### owner
|
||||
|
||||
```solidity
|
||||
@@ -239,6 +239,23 @@ Mapping from queue index to previous replay queue index.
|
||||
|---|---|---|
|
||||
| _0 | uint256 | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of ETH rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### relayMessageWithProof
|
||||
|
||||
```solidity
|
||||
@@ -436,6 +453,22 @@ Update max replay times.
|
||||
|---|---|---|
|
||||
| _newMaxReplayTimes | uint256 | The new max replay times. |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
### xDomainMessageSender
|
||||
|
||||
```solidity
|
||||
@@ -609,5 +642,22 @@ Emitted when the maximum number of times each message can be replayed is updated
|
||||
| oldMaxReplayTimes | uint256 | undefined |
|
||||
| newMaxReplayTimes | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
|
||||
@@ -225,6 +225,23 @@ function owner() external view returns (address)
|
||||
*Returns the address of the current owner.*
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _0 | address | undefined |
|
||||
|
||||
### rateLimiter
|
||||
|
||||
```solidity
|
||||
function rateLimiter() external view returns (address)
|
||||
```
|
||||
|
||||
The address of token rate limiter contract.
|
||||
|
||||
|
||||
|
||||
|
||||
#### Returns
|
||||
|
||||
| Name | Type | Description |
|
||||
@@ -275,6 +292,22 @@ function transferOwnership(address newOwner) external nonpayable
|
||||
|---|---|---|
|
||||
| newOwner | address | undefined |
|
||||
|
||||
### updateRateLimiter
|
||||
|
||||
```solidity
|
||||
function updateRateLimiter(address _newRateLimiter) external nonpayable
|
||||
```
|
||||
|
||||
Update rate limiter contract.
|
||||
|
||||
*This function can only called by contract owner.*
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _newRateLimiter | address | The address of new rate limiter contract. |
|
||||
|
||||
|
||||
|
||||
## Events
|
||||
@@ -372,5 +405,22 @@ Emitted when some ERC20 token is refunded.
|
||||
| recipient `indexed` | address | undefined |
|
||||
| amount | uint256 | undefined |
|
||||
|
||||
### UpdateRateLimiter
|
||||
|
||||
```solidity
|
||||
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
|
||||
```
|
||||
|
||||
Emitted when owner updates rate limiter contract.
|
||||
|
||||
|
||||
|
||||
#### Parameters
|
||||
|
||||
| Name | Type | Description |
|
||||
|---|---|---|
|
||||
| _oldRateLimiter `indexed` | address | undefined |
|
||||
| _newRateLimiter `indexed` | address | undefined |
|
||||
|
||||
|
||||
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user