mirror of
https://github.com/scroll-tech/scroll.git
synced 2026-01-11 15:08:09 -05:00
Compare commits
133 Commits
v4.1.24
...
docs/bridg
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
82330621ce | ||
|
|
237d048ce6 | ||
|
|
38b440b1fa | ||
|
|
0ed3d68fc3 | ||
|
|
2847d02498 | ||
|
|
a7d9bf3488 | ||
|
|
255eb5f307 | ||
|
|
0955382aec | ||
|
|
07961f751e | ||
|
|
bbd5a5a9c2 | ||
|
|
2e1f42fcb6 | ||
|
|
bcdbe1f119 | ||
|
|
6034c43bb1 | ||
|
|
f9da81d587 | ||
|
|
38f64e70b7 | ||
|
|
44b924170a | ||
|
|
227f09a2cf | ||
|
|
112e82a4ef | ||
|
|
82e6d28e82 | ||
|
|
8daa5d5496 | ||
|
|
3958e8bd86 | ||
|
|
f553a70d20 | ||
|
|
2dc5ceb44c | ||
|
|
ff03924d76 | ||
|
|
f6894bb82f | ||
|
|
e990e02391 | ||
|
|
f8d48a6326 | ||
|
|
dba097e03d | ||
|
|
30ad0bfe78 | ||
|
|
1dfca3b7c0 | ||
|
|
826e847b5a | ||
|
|
8c71a6d22a | ||
|
|
87f18efba8 | ||
|
|
fecd129a39 | ||
|
|
663156984f | ||
|
|
3499c595e7 | ||
|
|
95d2df46e3 | ||
|
|
102d29c54d | ||
|
|
7d50699344 | ||
|
|
e08b800d1d | ||
|
|
6139ca0df0 | ||
|
|
24a0fd08ac | ||
|
|
2840485f38 | ||
|
|
dab21fc712 | ||
|
|
c44b7f7bf4 | ||
|
|
a8c71b5e36 | ||
|
|
ae2f62df00 | ||
|
|
ce5c6e0aa3 | ||
|
|
e8ddf99184 | ||
|
|
ebf2b429a3 | ||
|
|
db46ce408d | ||
|
|
3d1a8374d0 | ||
|
|
574aa68491 | ||
|
|
589388b288 | ||
|
|
af8175800f | ||
|
|
e6793a85f5 | ||
|
|
7e9f3b7376 | ||
|
|
8b611b443a | ||
|
|
1f62596b0a | ||
|
|
a2b9878733 | ||
|
|
e166dfba48 | ||
|
|
d7d62eb8e6 | ||
|
|
61eb75fbb6 | ||
|
|
e9af5912ac | ||
|
|
c3b2f13fe0 | ||
|
|
0d847e0427 | ||
|
|
7de221ba89 | ||
|
|
16fd840896 | ||
|
|
20fb098a01 | ||
|
|
b734282a44 | ||
|
|
30bbfc0229 | ||
|
|
60bea74444 | ||
|
|
ae2e010324 | ||
|
|
767a2cbfdf | ||
|
|
74e5de156e | ||
|
|
becfd41b0e | ||
|
|
8542a176ed | ||
|
|
ea40517527 | ||
|
|
1f151f6ae5 | ||
|
|
ca76835712 | ||
|
|
247dd24a8f | ||
|
|
b006eaa33e | ||
|
|
ee4d7c3549 | ||
|
|
3c3f56b9b0 | ||
|
|
95121093c8 | ||
|
|
b85a109fd3 | ||
|
|
c091081f70 | ||
|
|
e42397867b | ||
|
|
9a5ad83121 | ||
|
|
ba90865ec6 | ||
|
|
66f3b42d24 | ||
|
|
a9a6b7464a | ||
|
|
24898602de | ||
|
|
623213a67a | ||
|
|
8e8a9c0351 | ||
|
|
974930f051 | ||
|
|
9654d76356 | ||
|
|
373e98ff3c | ||
|
|
34c3b91fd7 | ||
|
|
3adf077070 | ||
|
|
2a4d6e05a1 | ||
|
|
0ee99024bb | ||
|
|
0a3401772e | ||
|
|
69d2d2b7c2 | ||
|
|
e86399b5d4 | ||
|
|
b1e3b28dc6 | ||
|
|
0a9641be70 | ||
|
|
4a3056667e | ||
|
|
a78c8cf1df | ||
|
|
d68c6b7757 | ||
|
|
e6fe25f10f | ||
|
|
251c706c04 | ||
|
|
55658c6f52 | ||
|
|
3328000919 | ||
|
|
2ec9a4afae | ||
|
|
a1ebb3e42a | ||
|
|
58c0511a94 | ||
|
|
ba129ba161 | ||
|
|
8b58bd1eab | ||
|
|
7097bdb4e1 | ||
|
|
c3ae79bff4 | ||
|
|
e8f5251ae2 | ||
|
|
d10438ae0f | ||
|
|
a7ce900aa7 | ||
|
|
49ec0b8fa8 | ||
|
|
244e5e915a | ||
|
|
dc53d6d022 | ||
|
|
6dc89a9c0b | ||
|
|
9e450a7b0f | ||
|
|
af76593c1d | ||
|
|
a2ae697f79 | ||
|
|
41b07bd05a | ||
|
|
90dc0911d3 |
2
.github/pull_request_template.md
vendored
2
.github/pull_request_template.md
vendored
@@ -20,7 +20,7 @@ Your PR title must follow [conventional commits](https://www.conventionalcommits
|
||||
|
||||
### Deployment tag versioning
|
||||
|
||||
Has `tag` in `common/version.go` been updated?
|
||||
Has `tag` in `common/version.go` been updated or have you added `bump-version` label to this PR?
|
||||
|
||||
- [ ] No, this PR doesn't involve a new deployment, git tag, docker image tag
|
||||
- [ ] Yes
|
||||
|
||||
37
.github/scripts/bump_version_dot_go.mjs
vendored
Normal file
37
.github/scripts/bump_version_dot_go.mjs
vendored
Normal file
@@ -0,0 +1,37 @@
|
||||
import { URL } from "url";
|
||||
import { readFileSync, writeFileSync } from "fs";
|
||||
|
||||
const versionFilePath = new URL(
|
||||
"../../common/version/version.go",
|
||||
import.meta.url
|
||||
).pathname;
|
||||
|
||||
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
|
||||
|
||||
const currentVersion = versionFileContent.match(
|
||||
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
|
||||
);
|
||||
|
||||
try {
|
||||
parseInt(currentVersion.groups.major);
|
||||
parseInt(currentVersion.groups.minor);
|
||||
parseInt(currentVersion.groups.patch);
|
||||
} catch (err) {
|
||||
console.error(new Error("Failed to parse version in version.go file"));
|
||||
throw err;
|
||||
}
|
||||
|
||||
// prettier-ignore
|
||||
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
|
||||
|
||||
console.log(
|
||||
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
|
||||
);
|
||||
|
||||
writeFileSync(
|
||||
versionFilePath,
|
||||
versionFileContent.replace(
|
||||
`var tag = "${currentVersion.groups.version}"`,
|
||||
`var tag = "${newVersion}"`
|
||||
)
|
||||
);
|
||||
2
.github/workflows/bridge.yml
vendored
2
.github/workflows/bridge.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
pull_request:
|
||||
@@ -21,6 +22,7 @@ on:
|
||||
paths:
|
||||
- 'bridge/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/bridge.yml'
|
||||
|
||||
|
||||
63
.github/workflows/bump_version.yml
vendored
Normal file
63
.github/workflows/bump_version.yml
vendored
Normal file
@@ -0,0 +1,63 @@
|
||||
name: Bump Version
|
||||
|
||||
on:
|
||||
pull_request:
|
||||
branches: [ develop ]
|
||||
types:
|
||||
- opened
|
||||
- reopened
|
||||
- synchronize
|
||||
- ready_for_review
|
||||
- labeled
|
||||
|
||||
jobs:
|
||||
try-to-bump:
|
||||
if: contains(github.event.pull_request.labels.*.name, 'bump-version')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.head_ref }}
|
||||
- name: check diff
|
||||
id: check_diff
|
||||
run: |
|
||||
set -euo pipefail
|
||||
|
||||
# fetch develop branch so that we can diff against later
|
||||
git fetch origin develop
|
||||
|
||||
echo 'checking verion changes in diff...'
|
||||
|
||||
# check if version changed in version.go
|
||||
# note: the grep will fail if use \d instead of [0-9]
|
||||
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
|
||||
|
||||
exit_code=$?
|
||||
|
||||
# auto bump if version is not bumped manually
|
||||
echo '> require auto version bump?'
|
||||
|
||||
if [ $exit_code -eq 0 ]; then
|
||||
echo '> no, already bumped'
|
||||
echo "result=no-bump" >> "$GITHUB_OUTPUT"
|
||||
else
|
||||
echo '> yes'
|
||||
echo "result=bump" >> "$GITHUB_OUTPUT"
|
||||
fi
|
||||
- name: Install Node.js 16
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
uses: actions/setup-node@v3
|
||||
with:
|
||||
node-version: 16
|
||||
- name: bump version in common/version/version.go
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
run: node .github/scripts/bump_version_dot_go.mjs
|
||||
|
||||
# Commits made by this Action do not trigger new Workflow runs
|
||||
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
|
||||
if: steps.check_diff.outputs.result == 'bump'
|
||||
with:
|
||||
skip_fetch: true # already did fetch in check diff
|
||||
file_pattern: "common/version/version.go"
|
||||
commit_message: "chore: auto version bump [bot]"
|
||||
2
.github/workflows/common.yml
vendored
2
.github/workflows/common.yml
vendored
@@ -9,6 +9,7 @@ on:
|
||||
- alpha
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -18,6 +19,7 @@ on:
|
||||
- ready_for_review
|
||||
paths:
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/common.yml'
|
||||
|
||||
jobs:
|
||||
|
||||
2
.github/workflows/coordinator.yml
vendored
2
.github/workflows/coordinator.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
pull_request:
|
||||
@@ -21,6 +22,7 @@ on:
|
||||
paths:
|
||||
- 'coordinator/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- 'database/**'
|
||||
- '.github/workflows/coordinator.yml'
|
||||
|
||||
|
||||
2
.github/workflows/database.yml
vendored
2
.github/workflows/database.yml
vendored
@@ -10,6 +10,7 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
pull_request:
|
||||
types:
|
||||
@@ -20,6 +21,7 @@ on:
|
||||
paths:
|
||||
- 'database/**'
|
||||
- 'common/**'
|
||||
- '!common/version/version.go'
|
||||
- '.github/workflows/database.yml'
|
||||
|
||||
jobs:
|
||||
|
||||
90
.github/workflows/docker.yaml
vendored
90
.github/workflows/docker.yaml
vendored
@@ -6,7 +6,7 @@ on:
|
||||
- v**
|
||||
|
||||
jobs:
|
||||
build-and-push:
|
||||
event_watcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
@@ -27,6 +27,18 @@ jobs:
|
||||
tags: scrolltech/event-watcher:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
gas_oracle:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push gas_oracle docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -36,15 +48,18 @@ jobs:
|
||||
tags: scrolltech/gas-oracle:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
- name: Build and push msg_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
rollup_relayer:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/msg_relayer.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/msg-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push rollup_relayer docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -54,6 +69,18 @@ jobs:
|
||||
tags: scrolltech/rollup-relayer:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-cross-msg-fetcher:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -63,6 +90,18 @@ jobs:
|
||||
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
bridgehistoryapi-server:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push bridgehistoryapi-server docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -72,6 +111,18 @@ jobs:
|
||||
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
coordinator:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push coordinator docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
@@ -81,3 +132,24 @@ jobs:
|
||||
tags: scrolltech/coordinator:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
prover-stats-api:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v2
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v2
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v2
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Build and push prover-stats-api docker
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ./build/dockerfiles/prover-stats-api.Dockerfile
|
||||
push: true
|
||||
tags: scrolltech/prover-stats-api:${{github.ref_name}}
|
||||
# cache-from: type=gha,scope=${{ github.workflow }}
|
||||
# cache-to: type=gha,scope=${{ github.workflow }}
|
||||
|
||||
16
Makefile
16
Makefile
@@ -1,5 +1,7 @@
|
||||
.PHONY: check update dev_docker build_test_docker run_test_docker clean
|
||||
|
||||
L2GETH_TAG=scroll-v4.3.55
|
||||
|
||||
help: ## Display this help message
|
||||
@grep -h \
|
||||
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
|
||||
@@ -15,14 +17,14 @@ lint: ## The code's format and security checks.
|
||||
|
||||
update: ## update dependencies
|
||||
go work sync
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
|
||||
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
|
||||
goimports -local $(PWD)/bridge/ -w .
|
||||
goimports -local $(PWD)/bridge-history-api/ -w .
|
||||
goimports -local $(PWD)/common/ -w .
|
||||
|
||||
@@ -89,9 +89,6 @@ var (
|
||||
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L2FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
|
||||
L2ImportBlockEventSignature common.Hash
|
||||
|
||||
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
|
||||
L2AppendMessageEventSignature common.Hash
|
||||
)
|
||||
@@ -153,8 +150,6 @@ func init() {
|
||||
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
|
||||
|
||||
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
|
||||
|
||||
}
|
||||
|
||||
@@ -95,6 +95,9 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
|
||||
} else {
|
||||
startHeight = latestBatchHeight + 1
|
||||
}
|
||||
if startHeight < b.batchInfoStartNumber {
|
||||
startHeight = b.batchInfoStartNumber
|
||||
}
|
||||
for from := startHeight; number >= from; from += fetchLimit {
|
||||
to := from + fetchLimit - 1
|
||||
// number - confirmation can never less than 0 since the for loop condition
|
||||
|
||||
@@ -3,7 +3,7 @@ module bridge-history-api
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/ethereum/go-ethereum v1.12.0
|
||||
github.com/ethereum/go-ethereum v1.12.2
|
||||
github.com/gin-contrib/cors v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
@@ -33,13 +33,13 @@ require (
|
||||
github.com/consensys/bavard v0.1.13 // indirect
|
||||
github.com/consensys/gnark-crypto v0.10.0 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
|
||||
github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
|
||||
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
|
||||
github.com/deepmap/oapi-codegen v1.8.2 // indirect
|
||||
github.com/docker/docker v23.0.6+incompatible // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
|
||||
github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
@@ -61,6 +61,7 @@ require (
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
github.com/hashicorp/go-bexpr v0.1.10 // indirect
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
|
||||
github.com/holiman/uint256 v1.2.3 // indirect
|
||||
github.com/huin/goupnp v1.0.3 // indirect
|
||||
@@ -103,7 +104,7 @@ require (
|
||||
github.com/russross/blackfriday/v2 v2.1.0 // indirect
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect
|
||||
github.com/supranational/blst v0.3.11 // indirect
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
@@ -113,12 +114,12 @@ require (
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
|
||||
@@ -66,8 +66,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
|
||||
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=
|
||||
github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
@@ -94,10 +94,10 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
|
||||
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
|
||||
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
|
||||
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
|
||||
github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8=
|
||||
github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
|
||||
github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
|
||||
github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
|
||||
github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
|
||||
github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y=
|
||||
github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI=
|
||||
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
|
||||
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
@@ -212,6 +212,8 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx
|
||||
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
|
||||
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
|
||||
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
|
||||
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
|
||||
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
|
||||
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
|
||||
@@ -431,8 +433,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
|
||||
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
|
||||
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
|
||||
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
|
||||
@@ -491,11 +493,11 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
|
||||
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU=
|
||||
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
|
||||
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
|
||||
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
|
||||
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
|
||||
@@ -529,8 +531,8 @@ golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96b
|
||||
golang.org/x/net v0.0.0-20211008194852-3b03d305991f/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
@@ -585,8 +587,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -598,8 +600,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
.PHONY: lint docker clean bridge
|
||||
.PHONY: mock_abi bridge_bins event_watcher gas_oracle rollup_relayer test lint clean docker
|
||||
|
||||
IMAGE_NAME=bridge
|
||||
IMAGE_VERSION=latest
|
||||
REPO_ROOT_DIR=./..
|
||||
|
||||
@@ -11,7 +10,6 @@ mock_abi:
|
||||
bridge_bins: ## Builds the Bridge bins.
|
||||
go build -o $(PWD)/build/bin/event_watcher ./cmd/event_watcher/
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
event_watcher: ## Builds the event_watcher bin
|
||||
@@ -20,9 +18,6 @@ event_watcher: ## Builds the event_watcher bin
|
||||
gas_oracle: ## Builds the gas_oracle bin
|
||||
go build -o $(PWD)/build/bin/gas_oracle ./cmd/gas_oracle/
|
||||
|
||||
message_relayer: ## Builds the message_relayer bin
|
||||
go build -o $(PWD)/build/bin/message_relayer ./cmd/msg_relayer/
|
||||
|
||||
rollup_relayer: ## Builds the rollup_relayer bin
|
||||
go build -o $(PWD)/build/bin/rollup_relayer ./cmd/rollup_relayer/
|
||||
|
||||
@@ -39,10 +34,8 @@ docker_push:
|
||||
docker docker push scrolltech/gas-oracle:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/event-watcher:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/rollup-relayer:${IMAGE_VERSION}
|
||||
docker docker push scrolltech/msg-relayer:${IMAGE_VERSION}
|
||||
|
||||
docker:
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/gas-oracle:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/gas_oracle.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/event-watcher:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/event_watcher.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/rollup-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/rollup_relayer.Dockerfile
|
||||
DOCKER_BUILDKIT=1 docker build -t scrolltech/msg-relayer:${IMAGE_VERSION} ${REPO_ROOT_DIR}/ -f ${REPO_ROOT_DIR}/build/dockerfiles/msg_relayer.Dockerfile
|
||||
|
||||
@@ -2,11 +2,6 @@
|
||||
|
||||
This repo contains the Scroll bridge.
|
||||
|
||||
In addition, launching the bridge will launch a separate instance of l2geth, and sets up a communication channel
|
||||
between the two, over JSON-RPC sockets.
|
||||
|
||||
Something we should pay attention is that all private keys inside sender instance cannot be duplicated.
|
||||
|
||||
## Dependency
|
||||
|
||||
+ install `abigen`
|
||||
@@ -19,18 +14,26 @@ go install -v github.com/scroll-tech/go-ethereum/cmd/abigen
|
||||
|
||||
```bash
|
||||
make clean
|
||||
make bridge
|
||||
make mock_abi
|
||||
make bridge_bins
|
||||
```
|
||||
|
||||
## Start
|
||||
* use default ports and config.json
|
||||
|
||||
(Note: make sure you use different private keys for different senders in config.json.)
|
||||
|
||||
* use default ports and config.json.
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --http
|
||||
./build/bin/event_watcher --http
|
||||
./build/bin/gas_oracle --http
|
||||
./build/bin/rollup_relayer --http
|
||||
```
|
||||
|
||||
* use specified ports and config.json
|
||||
|
||||
```bash
|
||||
./build/bin/bridge --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
./build/bin/event_watcher --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
./build/bin/gas_oracle --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
./build/bin/rollup_relayer --config ./config.json --http --http.addr localhost --http.port 8290
|
||||
```
|
||||
|
||||
@@ -11,8 +11,6 @@ import (
|
||||
var (
|
||||
// ScrollChainABI holds information about ScrollChain's context and available invokable methods.
|
||||
ScrollChainABI *abi.ABI
|
||||
// L1ScrollMessengerABI holds information about L1ScrollMessenger's context and available invokable methods.
|
||||
L1ScrollMessengerABI *abi.ABI
|
||||
// L1MessageQueueABI holds information about L1MessageQueue contract's context and available invokable methods.
|
||||
L1MessageQueueABI *abi.ABI
|
||||
// L2GasPriceOracleABI holds information about L2GasPriceOracle's context and available invokable methods.
|
||||
@@ -20,25 +18,15 @@ var (
|
||||
|
||||
// L2ScrollMessengerABI holds information about L2ScrollMessenger's context and available invokable methods.
|
||||
L2ScrollMessengerABI *abi.ABI
|
||||
// L1BlockContainerABI holds information about L1BlockContainer contract's context and available invokable methods.
|
||||
L1BlockContainerABI *abi.ABI
|
||||
// L1GasPriceOracleABI holds information about L1GasPriceOracle's context and available invokable methods.
|
||||
L1GasPriceOracleABI *abi.ABI
|
||||
// L2MessageQueueABI holds information about L2MessageQueue contract's context and available invokable methods.
|
||||
L2MessageQueueABI *abi.ABI
|
||||
|
||||
// L1SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes)")
|
||||
L1SentMessageEventSignature common.Hash
|
||||
// L1RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
|
||||
L1RelayedMessageEventSignature common.Hash
|
||||
// L1FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L1FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L1CommitBatchEventSignature = keccak256("CommitBatch(uint256,bytes32)")
|
||||
L1CommitBatchEventSignature common.Hash
|
||||
// L1FinalizeBatchEventSignature = keccak256("FinalizeBatch(uint256,bytes32,bytes32,bytes32)")
|
||||
L1FinalizeBatchEventSignature common.Hash
|
||||
|
||||
// L1QueueTransactionEventSignature = keccak256("QueueTransaction(address,address,uint256,uint64,uint256,bytes)")
|
||||
L1QueueTransactionEventSignature common.Hash
|
||||
|
||||
@@ -49,28 +37,19 @@ var (
|
||||
// L2FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
|
||||
L2FailedRelayedMessageEventSignature common.Hash
|
||||
|
||||
// L2ImportBlockEventSignature = keccak256("ImportBlock(bytes32,uint256,uint256,uint256,bytes32)")
|
||||
L2ImportBlockEventSignature common.Hash
|
||||
|
||||
// L2AppendMessageEventSignature = keccak256("AppendMessage(uint256,bytes32)")
|
||||
L2AppendMessageEventSignature common.Hash
|
||||
)
|
||||
|
||||
func init() {
|
||||
ScrollChainABI, _ = ScrollChainMetaData.GetAbi()
|
||||
L1ScrollMessengerABI, _ = L1ScrollMessengerMetaData.GetAbi()
|
||||
L1MessageQueueABI, _ = L1MessageQueueMetaData.GetAbi()
|
||||
L2GasPriceOracleABI, _ = L2GasPriceOracleMetaData.GetAbi()
|
||||
|
||||
L2ScrollMessengerABI, _ = L2ScrollMessengerMetaData.GetAbi()
|
||||
L1BlockContainerABI, _ = L1BlockContainerMetaData.GetAbi()
|
||||
L2MessageQueueABI, _ = L2MessageQueueMetaData.GetAbi()
|
||||
L1GasPriceOracleABI, _ = L1GasPriceOracleMetaData.GetAbi()
|
||||
|
||||
L1SentMessageEventSignature = L1ScrollMessengerABI.Events["SentMessage"].ID
|
||||
L1RelayedMessageEventSignature = L1ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L1FailedRelayedMessageEventSignature = L1ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L1CommitBatchEventSignature = ScrollChainABI.Events["CommitBatch"].ID
|
||||
L1FinalizeBatchEventSignature = ScrollChainABI.Events["FinalizeBatch"].ID
|
||||
|
||||
@@ -80,8 +59,6 @@ func init() {
|
||||
L2RelayedMessageEventSignature = L2ScrollMessengerABI.Events["RelayedMessage"].ID
|
||||
L2FailedRelayedMessageEventSignature = L2ScrollMessengerABI.Events["FailedRelayedMessage"].ID
|
||||
|
||||
L2ImportBlockEventSignature = L1BlockContainerABI.Events["ImportBlock"].ID
|
||||
|
||||
L2AppendMessageEventSignature = L2MessageQueueABI.Events["AppendMessage"].ID
|
||||
}
|
||||
|
||||
|
||||
@@ -11,21 +11,13 @@ import (
|
||||
func TestEventSignature(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
assert.Equal(L1SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2c32d4ae151744d0bf0b9464a3e897a1d17ed2f1af71f7c9a75f12ce0d28238f"))
|
||||
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("26ba82f907317eedc97d0cbef23de76a43dd6edb563bdb6e9407645b950a7a2d"))
|
||||
|
||||
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))
|
||||
|
||||
assert.Equal(L2SentMessageEventSignature, common.HexToHash("104371f3b442861a2a7b82a070afbbaab748bb13757bf47769e170e37809ec1e"))
|
||||
assert.Equal(L2RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
|
||||
assert.Equal(L2FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
|
||||
|
||||
assert.Equal(L2ImportBlockEventSignature, common.HexToHash("a7823f45e1ee21f9530b77959b57507ad515a14fa9fa24d262ee80e79b2b5745"))
|
||||
|
||||
assert.Equal(L2AppendMessageEventSignature, common.HexToHash("faa617c2d8ce12c62637dbce76efcc18dae60574aa95709bdcedce7e76071693"))
|
||||
}
|
||||
|
||||
@@ -117,12 +109,3 @@ func TestPackSetL2BaseFee(t *testing.T) {
|
||||
_, err = l2GasOracleABI.Pack("setL2BaseFee", baseFee)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
func TestPackImportBlock(t *testing.T) {
|
||||
assert := assert.New(t)
|
||||
|
||||
l1BlockContainerABI := L1BlockContainerABI
|
||||
|
||||
_, err := l1BlockContainerABI.Pack("importBlockHeader", common.Hash{}, []byte{}, false)
|
||||
assert.NoError(err)
|
||||
}
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -59,8 +60,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
|
||||
@@ -72,8 +73,12 @@ func action(ctx *cli.Context) error {
|
||||
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
|
||||
go utils.Loop(subCtx, 10*time.Second, func() {
|
||||
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {
|
||||
@@ -84,6 +89,7 @@ func action(ctx *cli.Context) error {
|
||||
// Start l2 watcher process
|
||||
go utils.Loop(subCtx, 2*time.Second, l2watcher.FetchContractEvent)
|
||||
// Finish start all l2 functions
|
||||
|
||||
log.Info("Start event-watcher successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -61,8 +62,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
|
||||
if err != nil {
|
||||
@@ -77,15 +78,14 @@ func action(ctx *cli.Context) error {
|
||||
return err
|
||||
}
|
||||
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
|
||||
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
|
||||
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
|
||||
@@ -1,93 +0,0 @@
|
||||
package app
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/utils"
|
||||
"scroll-tech/common/version"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
)
|
||||
|
||||
var app *cli.App
|
||||
|
||||
func init() {
|
||||
// Set up message-relayer app info.
|
||||
app = cli.NewApp()
|
||||
app.Action = action
|
||||
app.Name = "message-relayer"
|
||||
app.Usage = "The Scroll Message Relayer"
|
||||
app.Description = "Message Relayer contains two main service: 1) relay l1 message to l2. 2) relay l2 message to l1."
|
||||
app.Version = version.Version
|
||||
app.Flags = append(app.Flags, utils.CommonFlags...)
|
||||
app.Commands = []*cli.Command{}
|
||||
app.Before = func(ctx *cli.Context) error {
|
||||
return utils.LogSetup(ctx)
|
||||
}
|
||||
// Register `message-relayer-test` app for integration-test.
|
||||
utils.RegisterSimulation(app, utils.MessageRelayerApp)
|
||||
}
|
||||
|
||||
func action(ctx *cli.Context) error {
|
||||
// Load config file.
|
||||
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
|
||||
cfg, err := config.NewConfig(cfgFile)
|
||||
if err != nil {
|
||||
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
|
||||
}
|
||||
|
||||
subCtx, cancel := context.WithCancel(ctx.Context)
|
||||
// Init db connection
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
if err != nil {
|
||||
log.Crit("failed to init db connection", "err", err)
|
||||
}
|
||||
defer func() {
|
||||
cancel()
|
||||
if err = database.CloseDB(db); err != nil {
|
||||
log.Error("can not close ormFactory", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
|
||||
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
|
||||
if err != nil {
|
||||
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
// Start l1relayer process
|
||||
go utils.Loop(subCtx, 10*time.Second, l1relayer.ProcessSavedEvents)
|
||||
|
||||
// Finish start all message relayer functions
|
||||
log.Info("Start message-relayer successfully")
|
||||
|
||||
// Catch CTRL-C to ensure a graceful shutdown.
|
||||
interrupt := make(chan os.Signal, 1)
|
||||
signal.Notify(interrupt, os.Interrupt)
|
||||
|
||||
// Wait until the interrupt signal is received from an OS signal.
|
||||
<-interrupt
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Run message_relayer cmd instance.
|
||||
func Run() {
|
||||
if err := app.Run(os.Args); err != nil {
|
||||
_, _ = fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -1,7 +0,0 @@
|
||||
package main
|
||||
|
||||
import "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
|
||||
func main() {
|
||||
app.Run()
|
||||
}
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"os/signal"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
@@ -62,8 +63,8 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
}()
|
||||
|
||||
// Start metrics server.
|
||||
metrics.Serve(subCtx, ctx)
|
||||
registry := prometheus.DefaultRegisterer
|
||||
metrics.Server(ctx, registry.(*prometheus.Registry))
|
||||
|
||||
// Init l2geth connection
|
||||
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
|
||||
@@ -73,26 +74,26 @@ func action(ctx *cli.Context) error {
|
||||
}
|
||||
|
||||
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
|
||||
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
|
||||
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
|
||||
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
|
||||
if err != nil {
|
||||
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
|
||||
return err
|
||||
}
|
||||
|
||||
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
|
||||
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
|
||||
|
||||
// Watcher loop to fetch missing blocks
|
||||
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
|
||||
@@ -106,11 +107,11 @@ func action(ctx *cli.Context) error {
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
|
||||
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
|
||||
|
||||
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
|
||||
|
||||
// Finish start all rollup relayer functions.
|
||||
log.Info("Start rollup-relayer successfully")
|
||||
|
||||
@@ -2,12 +2,10 @@
|
||||
"l1_config": {
|
||||
"confirmations": "0x6",
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
|
||||
"l1_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"scroll_chain_address": "0x0000000000000000000000000000000000000000",
|
||||
"start_height": 0,
|
||||
"relayer_config": {
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
|
||||
@@ -27,12 +25,7 @@
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"gas_oracle_sender_private_keys": [
|
||||
"1313131313131313131313131313131313131313131313131313131313131313"
|
||||
]
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
|
||||
}
|
||||
},
|
||||
"l2_config": {
|
||||
@@ -42,7 +35,6 @@
|
||||
"l2_message_queue_address": "0x0000000000000000000000000000000000000000",
|
||||
"relayer_config": {
|
||||
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
|
||||
"gas_price_oracle_address": "0x0000000000000000000000000000000000000000",
|
||||
"sender_config": {
|
||||
"endpoint": "DUMMY_ENDPOINT",
|
||||
@@ -61,23 +53,21 @@
|
||||
"min_gas_price": 0,
|
||||
"gas_price_diff": 50000
|
||||
},
|
||||
"chain_monitor": {
|
||||
"timeout": 3,
|
||||
"try_times": 5,
|
||||
"base_url": "http://localhost:8750"
|
||||
},
|
||||
"finalize_batch_interval_sec": 0,
|
||||
"message_sender_private_keys": [
|
||||
"1212121212121212121212121212121212121212121212121212121212121212"
|
||||
],
|
||||
"gas_oracle_sender_private_keys": [
|
||||
"1313131313131313131313131313131313131313131313131313131313131313"
|
||||
],
|
||||
"rollup_sender_private_keys": [
|
||||
"1414141414141414141414141414141414141414141414141414141414141414"
|
||||
]
|
||||
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
|
||||
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
|
||||
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515",
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
},
|
||||
"chunk_proposer_config": {
|
||||
"max_tx_gas_per_chunk": 1123456,
|
||||
"max_l2_tx_num_per_chunk": 1123,
|
||||
"max_tx_num_per_chunk": 1123,
|
||||
"max_l1_commit_gas_per_chunk": 11234567,
|
||||
"max_l1_commit_calldata_size_per_chunk": 112345,
|
||||
"min_l1_commit_calldata_size_per_chunk": 11234,
|
||||
"chunk_timeout_sec": 300,
|
||||
"max_row_consumption_per_chunk": 1048319,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
@@ -86,7 +76,6 @@
|
||||
"max_chunk_num_per_batch": 112,
|
||||
"max_l1_commit_gas_per_batch": 11234567,
|
||||
"max_l1_commit_calldata_size_per_batch": 112345,
|
||||
"min_chunk_num_per_batch": 11,
|
||||
"batch_timeout_sec": 300,
|
||||
"gas_cost_increase_multiplier": 1.2
|
||||
}
|
||||
|
||||
@@ -4,24 +4,35 @@ go 1.19
|
||||
|
||||
require (
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/go-resty/resty/v2 v2.7.0
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc
|
||||
github.com/smartystreets/goconvey v1.8.0
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
golang.org/x/sync v0.3.0
|
||||
gorm.io/gorm v1.25.2
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gopherjs/gopherjs v1.17.2 // indirect
|
||||
@@ -32,13 +43,23 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/jtolds/gls v4.20.0+incompatible // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-colorable v0.1.13 // indirect
|
||||
github.com/mattn/go-isatty v0.0.19 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
github.com/rogpeppe/go-internal v1.10.0 // indirect
|
||||
@@ -49,13 +70,19 @@ require (
|
||||
github.com/status-im/keycard-go v0.2.0 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/yaml.v3 v3.0.1 // indirect
|
||||
)
|
||||
|
||||
111
bridge/go.sum
111
bridge/go.sum
@@ -2,6 +2,8 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
|
||||
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
@@ -11,13 +13,20 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
|
||||
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/deckarep/golang-set v1.8.0 h1:sk9/l/KqpunDwP7pSjUg0keiOOLEnOBHzykLrsPppp4=
|
||||
@@ -25,14 +34,38 @@ github.com/deckarep/golang-set v1.8.0/go.mod h1:5nI87KwE7wgsBU1F4GKAw2Qod7p5kyS3
|
||||
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
|
||||
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-resty/resty/v2 v2.7.0 h1:me+K9p3uhSmXtrBZ4k9jcEAfJmuC8IivWHwaLZwPrFY=
|
||||
github.com/go-resty/resty/v2 v2.7.0/go.mod h1:9PWDzw47qPphMRFfhsyk0NnSgvluHcljSMVIq3w7q0I=
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
|
||||
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
|
||||
@@ -59,17 +92,21 @@ github.com/jinzhu/inflection v1.0.0/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkr
|
||||
github.com/jinzhu/now v1.1.5 h1:/o9tlHleP7gOFmsnYNz3RGnqzefHA47wQpKrrdTIwXQ=
|
||||
github.com/jinzhu/now v1.1.5/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
|
||||
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
|
||||
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
|
||||
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/mattn/go-colorable v0.1.13 h1:fFA4WZxdEF4tXPZVKMLwD8oUnCTTo08duU7wxecdEvA=
|
||||
github.com/mattn/go-colorable v0.1.13/go.mod h1:7S9/ev0klgBDR4GtXTXX8a3vIGJpMovkB8vQcUbaXHg=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
@@ -77,21 +114,36 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
|
||||
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
|
||||
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
|
||||
@@ -104,8 +156,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
|
||||
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc h1:eK3NOpjgm/b2TQ6rYqWx92Zri0kBuxf6gKjjsVxWKr8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
|
||||
@@ -118,6 +170,15 @@ github.com/smartystreets/goconvey v1.8.0 h1:Oi49ha/2MURE0WexF052Z0m+BNSGirfjg5RL
|
||||
github.com/smartystreets/goconvey v1.8.0/go.mod h1:EdX8jtrTIj26jmjCOVNMVSIYAtgexqXKHOXW2Dx9JLg=
|
||||
github.com/status-im/keycard-go v0.2.0 h1:QDLFswOQu1r5jsycloeQh3bVU8n/NatHHaZobtDnDzA=
|
||||
github.com/status-im/keycard-go v0.2.0/go.mod h1:wlp8ZLbsmrF6g6WjugPAx+IzoLrkdf9+mHxBEeo3Hbg=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
|
||||
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -125,23 +186,34 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 h1:bAn7/zixMGCfxrRTfdpNzjtPYqr8smhKouy9mxVdGPU=
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673/go.mod h1:N3UwUGtsrSj3ccvlPHLoLsHnpR27oXr4ZE984MbSER8=
|
||||
github.com/yusufpapurcu/wmi v1.2.2 h1:KBNDSne4vP5mbSWnJbO+51IMOXJB67QiYCSBrubbPRg=
|
||||
github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQmPyzfmi0=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20211029224645-99673261e6eb/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
|
||||
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
|
||||
@@ -149,26 +221,39 @@ golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5h
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
|
||||
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
|
||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
|
||||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/gorm v1.25.2 h1:gs1o6Vsa+oVKG/a9ElL3XgyGfghFfkKA2SInQaCyMho=
|
||||
gorm.io/gorm v1.25.2/go.mod h1:L4uxeKpfBml98NYqVqwAdmV1a2nBtAec/cf3fpucW/k=
|
||||
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=
|
||||
|
||||
@@ -2,6 +2,7 @@ package config
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
@@ -15,6 +16,13 @@ type Config struct {
|
||||
DBConfig *database.Config `json:"db_config"`
|
||||
}
|
||||
|
||||
func (c *Config) validate() error {
|
||||
if maxChunkPerBatch := c.L2Config.BatchProposerConfig.MaxChunkNumPerBatch; maxChunkPerBatch <= 0 {
|
||||
return fmt.Errorf("Invalid max_chunk_num_per_batch configuration: %v", maxChunkPerBatch)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// NewConfig returns a new instance of Config.
|
||||
func NewConfig(file string) (*Config, error) {
|
||||
buf, err := os.ReadFile(filepath.Clean(file))
|
||||
@@ -28,5 +36,8 @@ func NewConfig(file string) (*Config, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := cfg.validate(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
@@ -15,10 +15,6 @@ func TestConfig(t *testing.T) {
|
||||
cfg, err := NewConfig("../../conf/config.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)
|
||||
assert.Len(t, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys, 1)
|
||||
assert.Len(t, cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys, 1)
|
||||
|
||||
data, err := json.Marshal(cfg)
|
||||
assert.NoError(t, err)
|
||||
|
||||
|
||||
@@ -13,8 +13,6 @@ type L1Config struct {
|
||||
Endpoint string `json:"endpoint"`
|
||||
// The start height to sync event from layer 1
|
||||
StartHeight uint64 `json:"start_height"`
|
||||
// The L1ScrollMessenger contract address deployed on layer 1 chain.
|
||||
L1MessengerAddress common.Address `json:"l1_messenger_address"`
|
||||
// The L1MessageQueue contract address deployed on layer 1 chain.
|
||||
L1MessageQueueAddress common.Address `json:"l1_message_queue_address"`
|
||||
// The ScrollChain contract address deployed on layer 1 chain.
|
||||
|
||||
@@ -28,11 +28,9 @@ type L2Config struct {
|
||||
|
||||
// ChunkProposerConfig loads chunk_proposer configuration items.
|
||||
type ChunkProposerConfig struct {
|
||||
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
|
||||
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
|
||||
MaxTxNumPerChunk uint64 `json:"max_tx_num_per_chunk"`
|
||||
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
|
||||
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
|
||||
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
|
||||
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
|
||||
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
@@ -43,7 +41,6 @@ type BatchProposerConfig struct {
|
||||
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
|
||||
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
|
||||
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
|
||||
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
|
||||
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
|
||||
}
|
||||
|
||||
@@ -34,17 +34,21 @@ type SenderConfig struct {
|
||||
// The interval (in seconds) to check balance and top up sender's accounts
|
||||
CheckBalanceTime uint64 `json:"check_balance_time"`
|
||||
// The sender's pending count limit.
|
||||
PendingLimit int `json:"pending_limit,omitempty"`
|
||||
PendingLimit int `json:"pending_limit"`
|
||||
}
|
||||
|
||||
// ChainMonitor this config is used to get batch status from chain_monitor API.
|
||||
type ChainMonitor struct {
|
||||
TimeOut int `json:"timeout"`
|
||||
TryTimes int `json:"try_times"`
|
||||
BaseURL string `json:"base_url"`
|
||||
}
|
||||
|
||||
// RelayerConfig loads relayer configuration items.
|
||||
// What we need to pay attention to is that
|
||||
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
|
||||
type RelayerConfig struct {
|
||||
// RollupContractAddress store the rollup contract address.
|
||||
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
|
||||
// MessengerContractAddress store the scroll messenger contract address.
|
||||
MessengerContractAddress common.Address `json:"messenger_contract_address"`
|
||||
// GasPriceOracleContractAddress store the scroll messenger contract address.
|
||||
GasPriceOracleContractAddress common.Address `json:"gas_price_oracle_contract_address"`
|
||||
// sender config
|
||||
@@ -55,10 +59,14 @@ type RelayerConfig struct {
|
||||
FinalizeBatchIntervalSec uint64 `json:"finalize_batch_interval_sec"`
|
||||
// MessageRelayMinGasLimit to avoid OutOfGas error
|
||||
MessageRelayMinGasLimit uint64 `json:"message_relay_min_gas_limit,omitempty"`
|
||||
// ChainMonitor config of monitoring service
|
||||
ChainMonitor *ChainMonitor `json:"chain_monitor,omitempty"`
|
||||
// GasCostIncreaseMultiplier multiplier for min gas limit estimation
|
||||
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier,omitempty"`
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
|
||||
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
|
||||
}
|
||||
|
||||
// GasOracleConfig The config for updating gas price oracle.
|
||||
@@ -72,46 +80,55 @@ type GasOracleConfig struct {
|
||||
// relayerConfigAlias RelayerConfig alias name
|
||||
type relayerConfigAlias RelayerConfig
|
||||
|
||||
func convertAndCheck(key string, uniqueAddressesSet map[string]struct{}) (*ecdsa.PrivateKey, error) {
|
||||
if key == "" {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
privKey, err := crypto.ToECDSA(common.FromHex(key))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
addr := crypto.PubkeyToAddress(privKey.PublicKey).Hex()
|
||||
if _, exists := uniqueAddressesSet[addr]; exists {
|
||||
return nil, fmt.Errorf("detected duplicated address for private key: %s", addr)
|
||||
}
|
||||
uniqueAddressesSet[addr] = struct{}{}
|
||||
|
||||
return privKey, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON unmarshal relayer_config struct.
|
||||
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
var jsonConfig struct {
|
||||
var privateKeysConfig struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
}
|
||||
if err := json.Unmarshal(input, &jsonConfig); err != nil {
|
||||
return err
|
||||
var err error
|
||||
if err = json.Unmarshal(input, &privateKeysConfig); err != nil {
|
||||
return fmt.Errorf("failed to unmarshal private keys config: %w", err)
|
||||
}
|
||||
|
||||
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
|
||||
*r = RelayerConfig(privateKeysConfig.relayerConfigAlias)
|
||||
|
||||
// Get messenger private key list.
|
||||
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
|
||||
}
|
||||
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
|
||||
uniqueAddressesSet := make(map[string]struct{})
|
||||
|
||||
r.GasOracleSenderPrivateKey, err = convertAndCheck(privateKeysConfig.GasOracleSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking gas oracle sender private key: %w", err)
|
||||
}
|
||||
|
||||
// Get gas oracle private key list.
|
||||
for _, privStr := range jsonConfig.GasOracleSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
|
||||
}
|
||||
r.GasOracleSenderPrivateKeys = append(r.GasOracleSenderPrivateKeys, priv)
|
||||
r.CommitSenderPrivateKey, err = convertAndCheck(privateKeysConfig.CommitSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking commit sender private key: %w", err)
|
||||
}
|
||||
|
||||
// Get rollup private key
|
||||
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
|
||||
priv, err := crypto.ToECDSA(common.FromHex(privStr))
|
||||
if err != nil {
|
||||
return fmt.Errorf("incorrect prover_private_key format, err: %v", err)
|
||||
}
|
||||
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
|
||||
r.FinalizeSenderPrivateKey, err = convertAndCheck(privateKeysConfig.FinalizeSenderPrivateKey, uniqueAddressesSet)
|
||||
if err != nil {
|
||||
return fmt.Errorf("error converting and checking finalize sender private key: %w", err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@@ -119,28 +136,18 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
|
||||
|
||||
// MarshalJSON marshal RelayerConfig config, transfer private keys.
|
||||
func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
|
||||
jsonConfig := struct {
|
||||
privateKeysConfig := struct {
|
||||
relayerConfigAlias
|
||||
// The private key of the relayer
|
||||
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
|
||||
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys,omitempty"`
|
||||
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
|
||||
}{relayerConfigAlias(*r), nil, nil, nil}
|
||||
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
|
||||
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
|
||||
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
|
||||
}{}
|
||||
|
||||
// Transfer message sender private keys to hex type.
|
||||
for _, priv := range r.MessageSenderPrivateKeys {
|
||||
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
privateKeysConfig.relayerConfigAlias = relayerConfigAlias(*r)
|
||||
privateKeysConfig.GasOracleSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.GasOracleSenderPrivateKey))
|
||||
privateKeysConfig.CommitSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.CommitSenderPrivateKey))
|
||||
privateKeysConfig.FinalizeSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.FinalizeSenderPrivateKey))
|
||||
|
||||
// Transfer rollup sender private keys to hex type.
|
||||
for _, priv := range r.GasOracleSenderPrivateKeys {
|
||||
jsonConfig.GasOracleSenderPrivateKeys = append(jsonConfig.GasOracleSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
|
||||
// Transfer rollup sender private keys to hex type.
|
||||
for _, priv := range r.RollupSenderPrivateKeys {
|
||||
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
|
||||
}
|
||||
|
||||
return json.Marshal(&jsonConfig)
|
||||
return json.Marshal(&privateKeysConfig)
|
||||
}
|
||||
|
||||
@@ -6,10 +6,6 @@ const (
|
||||
gasPriceDiffPrecision = 1000000
|
||||
|
||||
defaultGasPriceDiff = 50000 // 5%
|
||||
|
||||
defaultL1MessageRelayMinGasLimit = 130000 // should be enough for both ERC20 and ETH relay
|
||||
|
||||
defaultL2MessageRelayMinGasLimit = 200000
|
||||
)
|
||||
|
||||
var (
|
||||
|
||||
@@ -3,17 +3,15 @@ package relayer
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
// not sure if this will make problems when relay with l1geth
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -22,11 +20,6 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer1Relayer is responsible for
|
||||
// 1. fetch pending L1Message from db
|
||||
// 2. relay pending message to layer 2 node
|
||||
@@ -38,38 +31,23 @@ type Layer1Relayer struct {
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
// channel used to communicate with transaction sender
|
||||
messageSender *sender.Sender
|
||||
l2MessengerABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
l1GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
l1MessageOrm *orm.L1Message
|
||||
l1BlockOrm *orm.L1Block
|
||||
l1BlockOrm *orm.L1Block
|
||||
metrics *l1RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// @todo make sure only one sender is available
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKeys[0].PublicKey)
|
||||
log.Error("new GasOracleSender failed", "main address", addr.String(), "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
@@ -82,83 +60,27 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultL1MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
|
||||
l1Relayer := &Layer1Relayer{
|
||||
ctx: ctx,
|
||||
l1MessageOrm: orm.NewL1Message(db),
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
|
||||
messageSender: messageSender,
|
||||
l2MessengerABI: bridgeAbi.L2ScrollMessengerABI,
|
||||
cfg: cfg,
|
||||
ctx: ctx,
|
||||
l1BlockOrm: orm.NewL1Block(db),
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
l1GasOracleABI: bridgeAbi.L1GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
cfg: cfg,
|
||||
}
|
||||
|
||||
l1Relayer.metrics = initL1RelayerMetrics(reg)
|
||||
|
||||
go l1Relayer.handleConfirmLoop(ctx)
|
||||
return l1Relayer, nil
|
||||
}
|
||||
|
||||
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
|
||||
func (r *Layer1Relayer) ProcessSavedEvents() {
|
||||
// msgs are sorted by nonce in increasing order
|
||||
msgs, err := r.l1MessageOrm.GetL1MessagesByStatus(types.MsgPending, 100)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if len(msgs) > 0 {
|
||||
log.Info("Processing L1 messages", "count", len(msgs))
|
||||
}
|
||||
|
||||
for _, msg := range msgs {
|
||||
tmpMsg := msg
|
||||
if err = r.processSavedEvent(&tmpMsg); err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
|
||||
calldata := common.Hex2Bytes(msg.Calldata)
|
||||
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), calldata, r.minGasLimitForMessageRelay)
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedMessageExpired) {
|
||||
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgExpired)
|
||||
}
|
||||
|
||||
if err != nil && errors.Is(err, ErrExecutionRevertedAlreadySuccessExecuted) {
|
||||
return r.l1MessageOrm.UpdateLayer1Status(r.ctx, msg.MsgHash, types.MsgConfirmed)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
bridgeL1MsgsRelayedTotalCounter.Inc(1)
|
||||
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
|
||||
|
||||
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
|
||||
if err != nil {
|
||||
log.Error("UpdateLayer1StatusAndLayer2Hash failed", "msg.msgHash", msg.MsgHash, "msg.height", msg.Height, "err", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer2
|
||||
func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
|
||||
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
|
||||
if err != nil {
|
||||
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
|
||||
@@ -203,6 +125,7 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = block.BaseFee
|
||||
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
|
||||
}
|
||||
}
|
||||
@@ -213,23 +136,8 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case cfm := <-r.messageSender.ConfirmChan():
|
||||
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
|
||||
if !cfm.IsSuccessful {
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Warn("transaction confirmed but failed in layer2", "confirmation", cfm)
|
||||
} else {
|
||||
// @todo handle db error
|
||||
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgConfirmed, cfm.TxHash.String())
|
||||
if err != nil {
|
||||
log.Warn("UpdateLayer1StatusAndLayer2Hash failed", "err", err)
|
||||
}
|
||||
log.Info("transaction confirmed in layer2", "confirmation", cfm)
|
||||
}
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
54
bridge/internal/controller/relayer/l1_relayer_metrics.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1RelayerMetrics struct {
|
||||
bridgeL1RelayedMsgsTotal prometheus.Counter
|
||||
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL1RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
|
||||
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1RelayerMetricOnce sync.Once
|
||||
l1RelayerMetric *l1RelayerMetrics
|
||||
)
|
||||
|
||||
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
|
||||
initL1RelayerMetricOnce.Do(func() {
|
||||
l1RelayerMetric = &l1RelayerMetrics{
|
||||
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_total",
|
||||
Help: "The total number of the l1 relayed message.",
|
||||
}),
|
||||
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_msg_relayed_failure_total",
|
||||
Help: "The total number of the l1 relayed failure message.",
|
||||
}),
|
||||
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_relayed_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_price_oracler_total",
|
||||
Help: "The total number of layer1 gas price oracler run total",
|
||||
}),
|
||||
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer1_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l1",
|
||||
}),
|
||||
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer1_gas_oracler_confirmed_total",
|
||||
Help: "The total number of layer1 relayed confirmed",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1RelayerMetric
|
||||
}
|
||||
@@ -22,33 +22,6 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
templateL1Message = []*orm.L1Message{
|
||||
{
|
||||
QueueIndex: 1,
|
||||
MsgHash: "msg_hash1",
|
||||
Height: 1,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash0",
|
||||
},
|
||||
{
|
||||
QueueIndex: 2,
|
||||
MsgHash: "msg_hash2",
|
||||
Height: 2,
|
||||
Sender: "0x596a746661dbed76a84556111c2872249b070e15",
|
||||
Value: "0x19ece",
|
||||
GasLimit: 11529940,
|
||||
Target: "0x2c73620b223808297ea734d946813f0dd78eb8f7",
|
||||
Calldata: "testdata",
|
||||
Layer1Hash: "hash1",
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
db, err := database.InitDB(cfg.DBConfig)
|
||||
assert.NoError(t, err)
|
||||
@@ -62,66 +35,11 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewL1Relayer(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
|
||||
func testL1RelayerProcessSaveEvents(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Cfg := cfg.L1Config
|
||||
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
|
||||
relayer.ProcessSavedEvents()
|
||||
msg1, err := l1MessageOrm.GetL1MessageByQueueIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg1.Status), types.MsgSubmitted)
|
||||
msg2, err := l1MessageOrm.GetL1MessageByQueueIndex(2)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg2.Status), types.MsgSubmitted)
|
||||
}
|
||||
|
||||
func testL1RelayerMsgConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
l1Messages := []*orm.L1Message{
|
||||
{MsgHash: "msg-1", QueueIndex: 0},
|
||||
{MsgHash: "msg-2", QueueIndex: 1},
|
||||
}
|
||||
err := l1MessageOrm.SaveL1Messages(context.Background(), l1Messages)
|
||||
assert.NoError(t, err)
|
||||
// Create and set up the Layer1 Relayer.
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-1",
|
||||
IsSuccessful: true,
|
||||
})
|
||||
l1Relayer.messageSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: "msg-2",
|
||||
IsSuccessful: false,
|
||||
})
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
msg1, err1 := l1MessageOrm.GetL1MessageByMsgHash("msg-1")
|
||||
msg2, err2 := l1MessageOrm.GetL1MessageByMsgHash("msg-2")
|
||||
return err1 == nil && types.MsgStatus(msg1.Status) == types.MsgConfirmed &&
|
||||
err2 == nil && types.MsgStatus(msg2.Status) == types.MsgRelayFailed
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL1RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
@@ -138,7 +56,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -168,7 +86,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
l1Cfg := cfg.L1Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, l1Relayer)
|
||||
|
||||
|
||||
@@ -8,15 +8,16 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/go-resty/resty/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -25,13 +26,6 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// Layer2Relayer is responsible for
|
||||
// 1. Committing and finalizing L2 blocks on L1
|
||||
// 2. Relaying messages from L2 to L1
|
||||
@@ -50,21 +44,20 @@ type Layer2Relayer struct {
|
||||
|
||||
cfg *config.RelayerConfig
|
||||
|
||||
messageSender *sender.Sender
|
||||
l1MessengerABI *abi.ABI
|
||||
|
||||
rollupSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
commitSender *sender.Sender
|
||||
finalizeSender *sender.Sender
|
||||
l1RollupABI *abi.ABI
|
||||
|
||||
gasOracleSender *sender.Sender
|
||||
l2GasOracleABI *abi.ABI
|
||||
|
||||
minGasLimitForMessageRelay uint64
|
||||
|
||||
lastGasPrice uint64
|
||||
minGasPrice uint64
|
||||
gasPriceDiff uint64
|
||||
|
||||
// Used to get batch status from chain_monitor api.
|
||||
chainMonitorClient *resty.Client
|
||||
|
||||
// A list of processing message.
|
||||
// key(string): confirmation ID, value(string): layer2 hash.
|
||||
processingMessage sync.Map
|
||||
@@ -76,27 +69,27 @@ type Layer2Relayer struct {
|
||||
// A list of processing batch finalization.
|
||||
// key(string): confirmation ID, value(string): batch hash.
|
||||
processingFinalization sync.Map
|
||||
|
||||
metrics *l2RelayerMetrics
|
||||
}
|
||||
|
||||
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
|
||||
// @todo use different sender for relayer, block commit and proof finalize
|
||||
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
|
||||
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
|
||||
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
|
||||
if err != nil {
|
||||
log.Error("Failed to create messenger sender", "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
|
||||
if err != nil {
|
||||
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
|
||||
if err != nil {
|
||||
log.Error("Failed to create rollup sender", "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
|
||||
if err != nil {
|
||||
log.Error("Failed to create gas oracle sender", "err", err)
|
||||
return nil, err
|
||||
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
|
||||
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
|
||||
}
|
||||
|
||||
var minGasPrice uint64
|
||||
@@ -109,10 +102,10 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
gasPriceDiff = defaultGasPriceDiff
|
||||
}
|
||||
|
||||
minGasLimitForMessageRelay := uint64(defaultL2MessageRelayMinGasLimit)
|
||||
if cfg.MessageRelayMinGasLimit != 0 {
|
||||
minGasLimitForMessageRelay = cfg.MessageRelayMinGasLimit
|
||||
}
|
||||
// chain_monitor client
|
||||
chainMonitorClient := resty.New()
|
||||
chainMonitorClient.SetRetryCount(cfg.ChainMonitor.TryTimes)
|
||||
chainMonitorClient.SetTimeout(time.Duration(cfg.ChainMonitor.TimeOut) * time.Second)
|
||||
|
||||
layer2Relayer := &Layer2Relayer{
|
||||
ctx: ctx,
|
||||
@@ -124,17 +117,13 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
|
||||
l2Client: l2Client,
|
||||
|
||||
messageSender: messageSender,
|
||||
l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
rollupSender: rollupSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
commitSender: commitSender,
|
||||
finalizeSender: finalizeSender,
|
||||
l1RollupABI: bridgeAbi.ScrollChainABI,
|
||||
|
||||
gasOracleSender: gasOracleSender,
|
||||
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
|
||||
|
||||
minGasLimitForMessageRelay: minGasLimitForMessageRelay,
|
||||
|
||||
minGasPrice: minGasPrice,
|
||||
gasPriceDiff: gasPriceDiff,
|
||||
|
||||
@@ -142,6 +131,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
processingMessage: sync.Map{},
|
||||
processingCommitment: sync.Map{},
|
||||
processingFinalization: sync.Map{},
|
||||
chainMonitorClient: chainMonitorClient,
|
||||
}
|
||||
|
||||
// Initialize genesis before we do anything else
|
||||
@@ -150,6 +140,7 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
|
||||
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
|
||||
}
|
||||
}
|
||||
layer2Relayer.metrics = initL2RelayerMetrics(reg)
|
||||
|
||||
go layer2Relayer.handleConfirmLoop(ctx)
|
||||
return layer2Relayer, nil
|
||||
@@ -190,8 +181,14 @@ func (r *Layer2Relayer) initializeGenesis() error {
|
||||
return fmt.Errorf("failed to update genesis chunk proving status: %v", err)
|
||||
}
|
||||
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: dbChunk.Hash,
|
||||
EndChunkIndex: 0,
|
||||
EndChunkHash: dbChunk.Hash,
|
||||
}
|
||||
var batch *orm.Batch
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, 0, 0, dbChunk.Hash, dbChunk.Hash, []*types.Chunk{chunk}, dbTX)
|
||||
batch, err = r.batchOrm.InsertBatch(r.ctx, []*types.Chunk{chunk}, batchMeta, dbTX)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to insert batch: %v", err)
|
||||
}
|
||||
@@ -230,7 +227,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
}
|
||||
|
||||
// submit genesis batch to L1 rollup contract
|
||||
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
|
||||
}
|
||||
@@ -245,14 +242,14 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
select {
|
||||
// print progress
|
||||
case <-ticker.C:
|
||||
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
|
||||
log.Info("Waiting for confirmation")
|
||||
|
||||
// timeout
|
||||
case <-time.After(5 * time.Minute):
|
||||
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
|
||||
|
||||
// handle confirmation
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
if confirmation.ID != batchHash {
|
||||
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
|
||||
}
|
||||
@@ -267,6 +264,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
|
||||
|
||||
// ProcessGasPriceOracle imports gas price to layer1
|
||||
func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
|
||||
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
|
||||
if batch == nil || err != nil {
|
||||
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
|
||||
@@ -304,6 +302,7 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
return
|
||||
}
|
||||
r.lastGasPrice = suggestGasPriceUint64
|
||||
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
|
||||
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
|
||||
}
|
||||
}
|
||||
@@ -312,12 +311,13 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
|
||||
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
|
||||
func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
// get pending batches from database in ascending order by their index.
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
|
||||
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 5)
|
||||
if err != nil {
|
||||
log.Error("Failed to fetch pending L2 batches", "err", err)
|
||||
return
|
||||
}
|
||||
for _, batch := range pendingBatches {
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
|
||||
// get current header and parent header.
|
||||
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
|
||||
if err != nil {
|
||||
@@ -374,11 +374,24 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
|
||||
// send transaction
|
||||
txID := batch.Hash + "-commit"
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
|
||||
minGasLimit := uint64(float64(batch.TotalL1CommitGas) * r.cfg.GasCostIncreaseMultiplier)
|
||||
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, minGasLimit)
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
|
||||
}
|
||||
log.Error(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"Failed to send commitBatch tx to layer1",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"calldata", common.Bytes2Hex(calldata),
|
||||
"err", err,
|
||||
)
|
||||
return
|
||||
}
|
||||
|
||||
@@ -387,7 +400,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
|
||||
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesCommittedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
|
||||
r.processingCommitment.Store(txID, batch.Hash)
|
||||
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
|
||||
}
|
||||
@@ -411,6 +424,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
return
|
||||
}
|
||||
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
|
||||
|
||||
batch := batches[0]
|
||||
hash := batch.Hash
|
||||
status := types.ProvingStatus(batch.ProvingStatus)
|
||||
@@ -418,12 +433,22 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
|
||||
// The proof for this block is not ready yet.
|
||||
return
|
||||
case types.ProvingTaskProved:
|
||||
// It's an intermediate state. The prover manager received the proof but has not verified
|
||||
// the proof yet. We don't roll up the proof until it's verified.
|
||||
return
|
||||
case types.ProvingTaskVerified:
|
||||
log.Info("Start to roll up zk proof", "hash", hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
|
||||
|
||||
// Check batch status before send `finalizeBatchWithProof` tx.
|
||||
//batchStatus, err := r.getBatchStatusByIndex(batch.Index)
|
||||
//if err != nil {
|
||||
// r.metrics.bridgeL2ChainMonitorLatestFailedCall.Inc()
|
||||
// log.Warn("failed to get batch status, please check chain_monitor api server", "batch_index", batch.Index, "err", err)
|
||||
// return
|
||||
//}
|
||||
//if !batchStatus {
|
||||
// r.metrics.bridgeL2ChainMonitorLatestFailedBatchStatus.Inc()
|
||||
// log.Error("the batch status is not right, stop finalize batch and check the reason", "batch_index", batch.Index)
|
||||
// return
|
||||
//}
|
||||
|
||||
var parentBatchStateRoot string
|
||||
if batch.Index > 0 {
|
||||
@@ -463,7 +488,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
|
||||
txID := hash + "-finalize"
|
||||
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
|
||||
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
|
||||
finalizeTxHash := &txHash
|
||||
if err != nil {
|
||||
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
|
||||
@@ -472,6 +497,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
// the client does not see the 1st tx's updates at this point.
|
||||
// TODO: add more fine-grained error handling
|
||||
log.Error(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
"RollupContractAddress", r.cfg.RollupContractAddress,
|
||||
"err", err,
|
||||
)
|
||||
log.Debug(
|
||||
"finalizeBatchWithProof in layer1 failed",
|
||||
"index", batch.Index,
|
||||
"hash", batch.Hash,
|
||||
@@ -482,7 +514,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
return
|
||||
}
|
||||
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
|
||||
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
|
||||
|
||||
// record and sync with db, @todo handle db error
|
||||
@@ -493,6 +524,7 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
"tx hash", finalizeTxHash.String(), "err", err)
|
||||
}
|
||||
r.processingFinalization.Store(txID, hash)
|
||||
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
|
||||
|
||||
case types.ProvingTaskFailed:
|
||||
// We were unable to prove this batch. There are two possibilities:
|
||||
@@ -518,6 +550,29 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
|
||||
}
|
||||
}
|
||||
|
||||
// batchStatusResponse the response schema
|
||||
type batchStatusResponse struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) getBatchStatusByIndex(batchIndex uint64) (bool, error) {
|
||||
var response batchStatusResponse
|
||||
resp, err := r.chainMonitorClient.R().SetResult(&response).Get(fmt.Sprintf("%s/v1/batch_status?batch_index=%d", r.cfg.ChainMonitor.BaseURL, batchIndex))
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if resp.IsError() {
|
||||
return false, resp.Error().(error)
|
||||
}
|
||||
if response.ErrCode != 0 {
|
||||
return false, fmt.Errorf("failed to get batch status, errCode: %d, errMsg: %s", response.ErrCode, response.ErrMsg)
|
||||
}
|
||||
|
||||
return response.Data, nil
|
||||
}
|
||||
|
||||
func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
transactionType := "Unknown"
|
||||
// check whether it is CommitBatches transaction
|
||||
@@ -537,7 +592,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
|
||||
r.processingCommitment.Delete(confirmation.ID)
|
||||
}
|
||||
|
||||
@@ -559,7 +614,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
|
||||
"batch hash", batchHash.(string),
|
||||
"tx hash", confirmation.TxHash.String(), "err", err)
|
||||
}
|
||||
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
|
||||
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
|
||||
r.processingFinalization.Delete(confirmation.ID)
|
||||
}
|
||||
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
|
||||
@@ -570,11 +625,12 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case confirmation := <-r.messageSender.ConfirmChan():
|
||||
case confirmation := <-r.commitSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case confirmation := <-r.rollupSender.ConfirmChan():
|
||||
case confirmation := <-r.finalizeSender.ConfirmChan():
|
||||
r.handleConfirmation(confirmation)
|
||||
case cfm := <-r.gasOracleSender.ConfirmChan():
|
||||
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
|
||||
if !cfm.IsSuccessful {
|
||||
// @discuss: maybe make it pending again?
|
||||
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())
|
||||
|
||||
84
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
84
bridge/internal/controller/relayer/l2_relayer_metrics.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package relayer
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2RelayerMetrics struct {
|
||||
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
|
||||
bridgeL2RelayerLastGasPrice prometheus.Gauge
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
|
||||
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
|
||||
bridgeL2ChainMonitorLatestFailedCall prometheus.Counter
|
||||
bridgeL2ChainMonitorLatestFailedBatchStatus prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL2RelayerMetricOnce sync.Once
|
||||
l2RelayerMetric *l2RelayerMetrics
|
||||
)
|
||||
|
||||
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
|
||||
initL2RelayerMetricOnce.Do(func() {
|
||||
l2RelayerMetric = &l2RelayerMetrics{
|
||||
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_total",
|
||||
Help: "The total number of layer2 process pending batch",
|
||||
}),
|
||||
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_pending_batch_success_total",
|
||||
Help: "The total number of layer2 process pending success batch",
|
||||
}),
|
||||
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_gas_price_oracler_total",
|
||||
Help: "The total number of layer2 gas price oracler run total",
|
||||
}),
|
||||
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_layer2_gas_price_latest_gas_price",
|
||||
Help: "The latest gas price of bridge relayer l2",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_total",
|
||||
Help: "The total number of layer2 process committed batches run total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_total",
|
||||
Help: "The total number of layer2 process committed batches finalized total",
|
||||
}),
|
||||
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
|
||||
Help: "The total number of layer2 process committed batches finalized success total",
|
||||
}),
|
||||
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_committed_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process committed batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
|
||||
Help: "The total number of layer2 process finalized batches confirmed total",
|
||||
}),
|
||||
bridgeL2ChainMonitorLatestFailedCall: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_chain_monitor_latest_failed_batch_call",
|
||||
Help: "The total number of failed call chain_monitor api",
|
||||
}),
|
||||
bridgeL2ChainMonitorLatestFailedBatchStatus: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_layer2_chain_monitor_latest_failed_batch_status",
|
||||
Help: "The total number of failed batch status get from chain_monitor",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2RelayerMetric
|
||||
}
|
||||
@@ -4,9 +4,12 @@ import (
|
||||
"context"
|
||||
"errors"
|
||||
"math/big"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/smartystreets/goconvey/convey"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -35,7 +38,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
|
||||
func testCreateNewRelayer(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
}
|
||||
@@ -45,7 +48,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2BlockOrm := orm.NewL2Block(db)
|
||||
@@ -56,8 +59,14 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
dbChunk2, err := chunkOrm.InsertChunk(context.Background(), chunk2)
|
||||
assert.NoError(t, err)
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: dbChunk1.Hash,
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: dbChunk2.Hash,
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, dbChunk1.Hash, dbChunk2.Hash, []*types.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
|
||||
relayer.ProcessPendingBatches()
|
||||
@@ -73,10 +82,16 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
defer database.CloseDB(db)
|
||||
|
||||
l2Cfg := cfg.L2Config
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
|
||||
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
|
||||
@@ -106,7 +121,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
}
|
||||
|
||||
func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
func testL2RelayerCommitConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
@@ -114,44 +129,95 @@ func testL2RelayerRollupConfirm(t *testing.T) {
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false, true, false}
|
||||
processingKeys := []string{"committed-1", "committed-2"}
|
||||
isSuccessful := []bool{true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[:2] {
|
||||
for i, key := range processingKeys {
|
||||
l2Relayer.processingCommitment.Store(key, batchHashes[i])
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
l2Relayer.commitSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
for i, key := range processingKeys[2:] {
|
||||
l2Relayer.processingFinalization.Store(key, batchHashes[i+2])
|
||||
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i+2],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupCommitted,
|
||||
types.RollupCommitFailed,
|
||||
}
|
||||
|
||||
for i, batchHash := range batchHashes {
|
||||
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
|
||||
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
})
|
||||
assert.True(t, ok)
|
||||
}
|
||||
|
||||
func testL2RelayerFinalizeConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
processingKeys := []string{"finalized-1", "finalized-2"}
|
||||
isSuccessful := []bool{true, false}
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batchHashes := make([]string, len(processingKeys))
|
||||
for i := range batchHashes {
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batch, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1, chunk2}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
batchHashes[i] = batch.Hash
|
||||
}
|
||||
|
||||
for i, key := range processingKeys {
|
||||
l2Relayer.processingFinalization.Store(key, batchHashes[i])
|
||||
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
|
||||
ID: key,
|
||||
IsSuccessful: isSuccessful[i],
|
||||
TxHash: common.HexToHash("0x123456789abcdef"),
|
||||
})
|
||||
}
|
||||
|
||||
// Check the database for the updated status using TryTimes.
|
||||
ok := utils.TryTimes(5, func() bool {
|
||||
expectedStatuses := []types.RollupStatus{
|
||||
types.RollupFinalized,
|
||||
types.RollupFinalizeFailed,
|
||||
}
|
||||
@@ -171,18 +237,30 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
batchMeta1 := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 0,
|
||||
EndChunkHash: chunkHash1.Hex(),
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1}, batchMeta1)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
|
||||
batchMeta2 := &types.BatchMeta{
|
||||
StartChunkIndex: 1,
|
||||
StartChunkHash: chunkHash2.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk2}, batchMeta2)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create and set up the Layer2 Relayer.
|
||||
l2Cfg := cfg.L2Config
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
defer cancel()
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Simulate message confirmations.
|
||||
@@ -220,7 +298,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
@@ -292,3 +370,33 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
|
||||
})
|
||||
relayer.ProcessGasPriceOracle()
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func testGetBatchStatusByIndex(t *testing.T) {
|
||||
db := setupL2RelayerDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, relayer)
|
||||
|
||||
status, err := relayer.getBatchStatusByIndex(1)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, true, status)
|
||||
}
|
||||
|
||||
@@ -86,10 +86,12 @@ func TestMain(m *testing.M) {
|
||||
|
||||
func TestFunctions(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(cfg.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// Run l1 relayer test cases.
|
||||
t.Run("TestCreateNewL1Relayer", testCreateNewL1Relayer)
|
||||
t.Run("TestL1RelayerProcessSaveEvents", testL1RelayerProcessSaveEvents)
|
||||
t.Run("TestL1RelayerMsgConfirm", testL1RelayerMsgConfirm)
|
||||
t.Run("TestL1RelayerGasOracleConfirm", testL1RelayerGasOracleConfirm)
|
||||
t.Run("TestL1RelayerProcessGasPriceOracle", testL1RelayerProcessGasPriceOracle)
|
||||
|
||||
@@ -97,7 +99,10 @@ func TestFunctions(t *testing.T) {
|
||||
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
|
||||
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
|
||||
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
|
||||
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
|
||||
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
|
||||
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
|
||||
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
|
||||
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
|
||||
// test getBatchStatusByIndex
|
||||
t.Run("TestGetBatchStatusByIndex", testGetBatchStatusByIndex)
|
||||
}
|
||||
|
||||
@@ -1,165 +0,0 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/ecdsa"
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
type accountPool struct {
|
||||
client *ethclient.Client
|
||||
|
||||
minBalance *big.Int
|
||||
accounts map[common.Address]*bind.TransactOpts
|
||||
accsCh chan *bind.TransactOpts
|
||||
}
|
||||
|
||||
// newAccounts creates an accountPool instance.
|
||||
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
|
||||
accs := &accountPool{
|
||||
client: client,
|
||||
minBalance: minBalance,
|
||||
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
|
||||
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
chainID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, privStr := range privs {
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(privStr, chainID)
|
||||
if err != nil {
|
||||
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Set pending nonce
|
||||
nonce, err := client.PendingNonceAt(ctx, auth.From)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
accs.accounts[auth.From] = auth
|
||||
accs.accsCh <- auth
|
||||
}
|
||||
|
||||
return accs, accs.checkAndSetBalances(ctx)
|
||||
}
|
||||
|
||||
// getAccount get auth from channel.
|
||||
func (a *accountPool) getAccount() *bind.TransactOpts {
|
||||
select {
|
||||
case auth := <-a.accsCh:
|
||||
return auth
|
||||
default:
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// releaseAccount set used auth into channel.
|
||||
func (a *accountPool) releaseAccount(auth *bind.TransactOpts) {
|
||||
a.accsCh <- auth
|
||||
}
|
||||
|
||||
// reSetNonce reset nonce if send signed tx failed.
|
||||
func (a *accountPool) resetNonce(ctx context.Context, auth *bind.TransactOpts) {
|
||||
nonce, err := a.client.PendingNonceAt(ctx, auth.From)
|
||||
if err != nil {
|
||||
log.Warn("failed to reset nonce", "address", auth.From.String(), "err", err)
|
||||
return
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
}
|
||||
|
||||
// checkAndSetBalance check balance and set min balance.
|
||||
func (a *accountPool) checkAndSetBalances(ctx context.Context) error {
|
||||
var (
|
||||
root *bind.TransactOpts
|
||||
maxBls = big.NewInt(0)
|
||||
lostAuths []*bind.TransactOpts
|
||||
)
|
||||
|
||||
for addr, auth := range a.accounts {
|
||||
bls, err := a.client.BalanceAt(ctx, addr, nil)
|
||||
if err != nil || bls.Cmp(a.minBalance) < 0 {
|
||||
if err != nil {
|
||||
log.Warn("failed to get balance", "address", addr.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
lostAuths = append(lostAuths, auth)
|
||||
continue
|
||||
} else if bls.Cmp(maxBls) > 0 { // Find the biggest balance account.
|
||||
root, maxBls = auth, bls
|
||||
}
|
||||
}
|
||||
if root == nil {
|
||||
return fmt.Errorf("no account has enough balance")
|
||||
}
|
||||
if len(lostAuths) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var (
|
||||
tx *types.Transaction
|
||||
err error
|
||||
)
|
||||
for _, auth := range lostAuths {
|
||||
tx, err = a.createSignedTx(root, &auth.From, a.minBalance)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = a.client.SendTransaction(ctx, tx)
|
||||
if err != nil {
|
||||
log.Error("Failed to send balance to account", "err", err)
|
||||
return err
|
||||
}
|
||||
log.Debug("send balance to account", "account", auth.From.String(), "balance", a.minBalance.String())
|
||||
}
|
||||
// wait util mined
|
||||
if _, err = bind.WaitMined(ctx, a.client, tx); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// Reset root's nonce.
|
||||
a.resetNonce(ctx, root)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (a *accountPool) createSignedTx(from *bind.TransactOpts, to *common.Address, value *big.Int) (*types.Transaction, error) {
|
||||
gasPrice, err := a.client.SuggestGasPrice(context.Background())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
gasPrice.Mul(gasPrice, big.NewInt(2))
|
||||
|
||||
// Get pending nonce
|
||||
nonce, err := a.client.PendingNonceAt(context.Background(), from.From)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: nonce,
|
||||
To: to,
|
||||
Value: value,
|
||||
Gas: 500000,
|
||||
GasPrice: gasPrice,
|
||||
})
|
||||
signedTx, err := from.Signer(from.From, tx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return signedTx, nil
|
||||
}
|
||||
@@ -7,15 +7,18 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas SuggestGasPrice failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, gasPrice, nil, nil, value, minGasLimit)
|
||||
if err != nil {
|
||||
log.Error("estimateLegacyGas estimateGasLimit failure", "gasPrice", gasPrice, "error", err)
|
||||
return nil, err
|
||||
}
|
||||
return &FeeData{
|
||||
@@ -27,6 +30,7 @@ func (s *Sender) estimateLegacyGas(auth *bind.TransactOpts, contract *common.Add
|
||||
func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Address, value *big.Int, input []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
gasTipCap, err := s.client.SuggestGasTipCap(s.ctx)
|
||||
if err != nil {
|
||||
log.Error("estimateDynamicGas SuggestGasTipCap failure", "error", err)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@@ -40,7 +44,11 @@ func (s *Sender) estimateDynamicGas(auth *bind.TransactOpts, contract *common.Ad
|
||||
)
|
||||
gasLimit, err := s.estimateGasLimit(auth, contract, input, nil, gasTipCap, gasFeeCap, value, minGasLimit)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
log.Error("estimateDynamicGas estimateGasLimit failure", "error", err)
|
||||
if minGasLimit == 0 {
|
||||
return nil, err
|
||||
}
|
||||
gasLimit = minGasLimit
|
||||
}
|
||||
return &FeeData{
|
||||
gasLimit: gasLimit,
|
||||
@@ -61,13 +69,14 @@ func (s *Sender) estimateGasLimit(opts *bind.TransactOpts, contract *common.Addr
|
||||
}
|
||||
gasLimit, err := s.client.EstimateGas(s.ctx, msg)
|
||||
if err != nil {
|
||||
log.Error("estimateGasLimit EstimateGas failure", "error", err)
|
||||
return 0, err
|
||||
}
|
||||
if minGasLimit > gasLimit {
|
||||
gasLimit = minGasLimit
|
||||
}
|
||||
|
||||
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
|
||||
gasLimit = gasLimit * 15 / 10 // 50% extra gas to avoid out of gas error
|
||||
|
||||
return gasLimit, nil
|
||||
}
|
||||
|
||||
95
bridge/internal/controller/sender/metrics.go
Normal file
95
bridge/internal/controller/sender/metrics.go
Normal file
@@ -0,0 +1,95 @@
|
||||
package sender
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type senderMetrics struct {
|
||||
senderCheckBalancerTotal *prometheus.CounterVec
|
||||
senderCheckPendingTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionTotal *prometheus.CounterVec
|
||||
sendTransactionFailureFullTx *prometheus.GaugeVec
|
||||
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
|
||||
sendTransactionFailureGetFee *prometheus.CounterVec
|
||||
sendTransactionFailureSendTx *prometheus.CounterVec
|
||||
resubmitTransactionTotal *prometheus.CounterVec
|
||||
currentPendingTxsNum *prometheus.GaugeVec
|
||||
currentGasFeeCap *prometheus.GaugeVec
|
||||
currentGasTipCap *prometheus.GaugeVec
|
||||
currentGasPrice *prometheus.GaugeVec
|
||||
currentGasLimit *prometheus.GaugeVec
|
||||
currentNonce *prometheus.GaugeVec
|
||||
}
|
||||
|
||||
var (
|
||||
initSenderMetricOnce sync.Once
|
||||
sm *senderMetrics
|
||||
)
|
||||
|
||||
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
|
||||
initSenderMetricOnce.Do(func() {
|
||||
sm = &senderMetrics{
|
||||
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_total",
|
||||
Help: "The total number of sending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_send_transaction_full_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for full size tx.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
|
||||
Help: "The total number of sending transaction failure for repeat transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_get_fee_failure_total",
|
||||
Help: "The total number of sending transaction failure for getting fee.",
|
||||
}, []string{"service", "name"}),
|
||||
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_send_tx_failure_total",
|
||||
Help: "The total number of sending transaction failure for sending tx.",
|
||||
}, []string{"service", "name"}),
|
||||
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
|
||||
Help: "The total number of resubmit transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_pending_tx_count",
|
||||
Help: "The pending tx count in the sender.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_fee_cap",
|
||||
Help: "The gas fee of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_tip_cap",
|
||||
Help: "The gas tip of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_price_cap",
|
||||
Help: "The gas price of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_gas_limit",
|
||||
Help: "The gas limit of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
|
||||
Name: "bridge_sender_nonce",
|
||||
Help: "The nonce of current transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_check_pending_transaction_total",
|
||||
Help: "The total number of check pending transaction.",
|
||||
}, []string{"service", "name"}),
|
||||
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
|
||||
Name: "bridge_sender_check_balancer_total",
|
||||
Help: "The total number of check balancer.",
|
||||
}, []string{"service", "name"}),
|
||||
}
|
||||
})
|
||||
|
||||
return sm
|
||||
}
|
||||
@@ -11,6 +11,7 @@ import (
|
||||
"time"
|
||||
|
||||
cmapV2 "github.com/orcaman/concurrent-map/v2"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -39,10 +40,6 @@ var (
|
||||
ErrFullPending = errors.New("sender's pending pool is full")
|
||||
)
|
||||
|
||||
var (
|
||||
defaultPendingLimit = 10
|
||||
)
|
||||
|
||||
// Confirmation struct used to indicate transaction confirmation details
|
||||
type Confirmation struct {
|
||||
ID string
|
||||
@@ -74,9 +71,11 @@ type Sender struct {
|
||||
client *ethclient.Client // The client to retrieve on chain data or send transaction.
|
||||
chainID *big.Int // The chain id of the endpoint
|
||||
ctx context.Context
|
||||
service string
|
||||
name string
|
||||
|
||||
// account fields.
|
||||
auths *accountPool
|
||||
auth *bind.TransactOpts
|
||||
minBalance *big.Int
|
||||
|
||||
blockNumber uint64 // Current block number on chain.
|
||||
baseFeePerGas uint64 // Current base fee per gas on chain
|
||||
@@ -84,31 +83,40 @@ type Sender struct {
|
||||
confirmCh chan *Confirmation
|
||||
|
||||
stopCh chan struct{}
|
||||
|
||||
metrics *senderMetrics
|
||||
}
|
||||
|
||||
// NewSender returns a new instance of transaction sender
|
||||
// txConfirmationCh is used to notify confirmed transaction
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
|
||||
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
|
||||
client, err := ethclient.Dial(config.Endpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
|
||||
}
|
||||
|
||||
// get chainID from client
|
||||
chainID, err := client.ChainID(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get chain ID, err: %w", err)
|
||||
}
|
||||
|
||||
auths, err := newAccountPool(ctx, config.MinBalance, client, privs)
|
||||
auth, err := bind.NewKeyedTransactorWithChainID(priv, chainID)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create account pool, err: %v", err)
|
||||
return nil, fmt.Errorf("failed to create transactor with chain ID %v, err: %w", chainID, err)
|
||||
}
|
||||
|
||||
// Set pending nonce
|
||||
nonce, err := client.PendingNonceAt(ctx, auth.From)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get pending nonce for address %s, err: %w", auth.From.Hex(), err)
|
||||
}
|
||||
auth.Nonce = big.NewInt(int64(nonce))
|
||||
|
||||
// get header by number
|
||||
header, err := client.HeaderByNumber(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get header by number, err: %w", err)
|
||||
}
|
||||
|
||||
var baseFeePerGas uint64
|
||||
@@ -116,27 +124,26 @@ func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.
|
||||
if header.BaseFee != nil {
|
||||
baseFeePerGas = header.BaseFee.Uint64()
|
||||
} else {
|
||||
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
|
||||
return nil, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
|
||||
}
|
||||
}
|
||||
|
||||
// initialize pending limit with a default value
|
||||
if config.PendingLimit == 0 {
|
||||
config.PendingLimit = defaultPendingLimit
|
||||
}
|
||||
|
||||
sender := &Sender{
|
||||
ctx: ctx,
|
||||
config: config,
|
||||
client: client,
|
||||
chainID: chainID,
|
||||
auths: auths,
|
||||
auth: auth,
|
||||
minBalance: config.MinBalance,
|
||||
confirmCh: make(chan *Confirmation, 128),
|
||||
blockNumber: header.Number.Uint64(),
|
||||
baseFeePerGas: baseFeePerGas,
|
||||
pendingTxs: cmapV2.New[*PendingTransaction](),
|
||||
stopCh: make(chan struct{}),
|
||||
name: name,
|
||||
service: service,
|
||||
}
|
||||
sender.metrics = initSenderMetrics(reg)
|
||||
|
||||
go sender.loop(ctx)
|
||||
|
||||
@@ -175,11 +182,6 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
|
||||
s.confirmCh <- cfm
|
||||
}
|
||||
|
||||
// NumberOfAccounts return the count of accounts.
|
||||
func (s *Sender) NumberOfAccounts() int {
|
||||
return len(s.auths.accounts)
|
||||
}
|
||||
|
||||
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (*FeeData, error) {
|
||||
if s.config.TxType == DynamicFeeTxType {
|
||||
return s.estimateDynamicGas(auth, target, value, data, minGasLimit)
|
||||
@@ -188,53 +190,55 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
|
||||
}
|
||||
|
||||
// SendTransaction send a signed L2tL1 transaction.
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
|
||||
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
|
||||
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
if s.IsFull() {
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
|
||||
return common.Hash{}, ErrFullPending
|
||||
}
|
||||
// We occupy the ID, in case some other threads call with the same ID in the same time
|
||||
|
||||
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
|
||||
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
|
||||
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
|
||||
}
|
||||
// get
|
||||
auth := s.auths.getAccount()
|
||||
if auth == nil {
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
return common.Hash{}, ErrNoAvailableAccount
|
||||
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
|
||||
}
|
||||
|
||||
defer s.auths.releaseAccount(auth)
|
||||
var (
|
||||
feeData *FeeData
|
||||
tx *types.Transaction
|
||||
err error
|
||||
)
|
||||
|
||||
defer func() {
|
||||
if err != nil {
|
||||
s.pendingTxs.Remove(ID) // release the ID on failure
|
||||
}
|
||||
}()
|
||||
|
||||
var (
|
||||
feeData *FeeData
|
||||
tx *types.Transaction
|
||||
)
|
||||
// estimate gas fee
|
||||
if feeData, err = s.getFeeData(auth, target, value, data, minGasLimit); err != nil {
|
||||
return
|
||||
}
|
||||
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
|
||||
// add pending transaction to queue
|
||||
pending := &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: auth,
|
||||
submitAt: atomic.LoadUint64(&s.blockNumber),
|
||||
feeData: feeData,
|
||||
}
|
||||
s.pendingTxs.Set(ID, pending)
|
||||
return tx.Hash(), nil
|
||||
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
|
||||
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
|
||||
log.Error("failed to get fee data", "err", err)
|
||||
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
|
||||
}
|
||||
|
||||
return
|
||||
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
|
||||
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
|
||||
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
|
||||
}
|
||||
|
||||
// add pending transaction
|
||||
pending := &PendingTransaction{
|
||||
tx: tx,
|
||||
id: ID,
|
||||
signer: s.auth,
|
||||
submitAt: atomic.LoadUint64(&s.blockNumber),
|
||||
feeData: feeData,
|
||||
}
|
||||
s.pendingTxs.Set(ID, pending)
|
||||
return tx.Hash(), nil
|
||||
}
|
||||
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
|
||||
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*types.Transaction, error) {
|
||||
var (
|
||||
nonce = auth.Nonce.Uint64()
|
||||
txData types.TxData
|
||||
@@ -292,26 +296,50 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
|
||||
}
|
||||
|
||||
// sign and send
|
||||
tx, err = auth.Signer(auth.From, types.NewTx(txData))
|
||||
tx, err := auth.Signer(auth.From, types.NewTx(txData))
|
||||
if err != nil {
|
||||
log.Error("failed to sign tx", "err", err)
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
|
||||
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "err", err)
|
||||
// Check if contain nonce, and reset nonce
|
||||
// only reset nonce when it is not from resubmit
|
||||
if strings.Contains(err.Error(), "nonce") && overrideNonce == nil {
|
||||
s.auths.resetNonce(context.Background(), auth)
|
||||
s.resetNonce(context.Background())
|
||||
}
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if feeData.gasTipCap != nil {
|
||||
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
|
||||
}
|
||||
|
||||
if feeData.gasFeeCap != nil {
|
||||
s.metrics.currentGasFeeCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasFeeCap.Uint64()))
|
||||
}
|
||||
|
||||
if feeData.gasPrice != nil {
|
||||
s.metrics.currentGasPrice.WithLabelValues(s.service, s.name).Set(float64(feeData.gasPrice.Uint64()))
|
||||
}
|
||||
|
||||
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
|
||||
|
||||
// update nonce when it is not from resubmit
|
||||
if overrideNonce == nil {
|
||||
auth.Nonce = big.NewInt(int64(nonce + 1))
|
||||
}
|
||||
return
|
||||
return tx, nil
|
||||
}
|
||||
|
||||
// reSetNonce reset nonce if send signed tx failed.
|
||||
func (s *Sender) resetNonce(ctx context.Context) {
|
||||
nonce, err := s.client.PendingNonceAt(ctx, s.auth.From)
|
||||
if err != nil {
|
||||
log.Warn("failed to reset nonce", "address", s.auth.From.String(), "err", err)
|
||||
return
|
||||
}
|
||||
s.auth.Nonce = big.NewInt(int64(nonce))
|
||||
}
|
||||
|
||||
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
|
||||
@@ -319,8 +347,15 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
|
||||
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
|
||||
|
||||
txInfo := map[string]interface{}{
|
||||
"tx_hash": tx.Hash().String(),
|
||||
"tx_type": s.config.TxType,
|
||||
"from": auth.From.String(),
|
||||
}
|
||||
|
||||
switch s.config.TxType {
|
||||
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
|
||||
originalGasPrice := feeData.gasPrice
|
||||
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
|
||||
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
|
||||
if gasPrice.Cmp(feeData.gasPrice) < 0 {
|
||||
@@ -330,7 +365,13 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
gasPrice = maxGasPrice
|
||||
}
|
||||
feeData.gasPrice = gasPrice
|
||||
|
||||
txInfo["original_gas_price"] = originalGasPrice
|
||||
txInfo["adjusted_gas_price"] = gasPrice
|
||||
default:
|
||||
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
|
||||
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
|
||||
|
||||
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
|
||||
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
|
||||
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
|
||||
@@ -362,9 +403,17 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
|
||||
}
|
||||
feeData.gasFeeCap = gasFeeCap
|
||||
feeData.gasTipCap = gasTipCap
|
||||
|
||||
txInfo["original_gas_tip_cap"] = originalGasTipCap
|
||||
txInfo["adjusted_gas_tip_cap"] = gasTipCap
|
||||
txInfo["original_gas_fee_cap"] = originalGasFeeCap
|
||||
txInfo["adjusted_gas_fee_cap"] = gasFeeCap
|
||||
}
|
||||
|
||||
log.Debug("Transaction gas adjustment details", txInfo)
|
||||
|
||||
nonce := tx.Nonce()
|
||||
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
|
||||
}
|
||||
|
||||
@@ -401,6 +450,12 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
}
|
||||
}
|
||||
} else if s.config.EscalateBlocks+pending.submitAt < number {
|
||||
log.Debug("resubmit transaction",
|
||||
"tx hash", pending.tx.Hash().String(),
|
||||
"submit block number", pending.submitAt,
|
||||
"current block number", number,
|
||||
"escalateBlocks", s.config.EscalateBlocks)
|
||||
|
||||
var tx *types.Transaction
|
||||
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
|
||||
if err != nil {
|
||||
@@ -448,6 +503,22 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
|
||||
}
|
||||
}
|
||||
|
||||
// checkBalance checks balance and print error log if balance is under a threshold.
|
||||
func (s *Sender) checkBalance(ctx context.Context) error {
|
||||
bls, err := s.client.BalanceAt(ctx, s.auth.From, nil)
|
||||
if err != nil {
|
||||
log.Warn("failed to get balance", "address", s.auth.From.String(), "err", err)
|
||||
return err
|
||||
}
|
||||
|
||||
if bls.Cmp(s.minBalance) < 0 {
|
||||
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
|
||||
bls.String(), s.minBalance.String(), s.auth.From.String())
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Loop is the main event loop
|
||||
func (s *Sender) loop(ctx context.Context) {
|
||||
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
|
||||
@@ -459,6 +530,7 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
for {
|
||||
select {
|
||||
case <-checkTick.C:
|
||||
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
header, err := s.client.HeaderByNumber(s.ctx, nil)
|
||||
if err != nil {
|
||||
log.Error("failed to get latest head", "err", err)
|
||||
@@ -473,8 +545,11 @@ func (s *Sender) loop(ctx context.Context) {
|
||||
|
||||
s.checkPendingTransaction(header, confirmed)
|
||||
case <-checkBalanceTicker.C:
|
||||
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
|
||||
// Check and set balance.
|
||||
_ = s.auths.checkAndSetBalances(ctx)
|
||||
if err := s.checkBalance(ctx); err != nil {
|
||||
log.Error("check balance error", "err", err)
|
||||
}
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-s.stopCh:
|
||||
|
||||
@@ -7,10 +7,8 @@ import (
|
||||
"math/big"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/agiledragon/gomonkey/v2"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -18,7 +16,6 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"scroll-tech/common/docker"
|
||||
|
||||
@@ -28,10 +25,10 @@ import (
|
||||
const TXBatch = 50
|
||||
|
||||
var (
|
||||
privateKeys []*ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
privateKey *ecdsa.PrivateKey
|
||||
cfg *config.Config
|
||||
base *docker.App
|
||||
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
@@ -50,7 +47,7 @@ func setupEnv(t *testing.T) {
|
||||
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
|
||||
assert.NoError(t, err)
|
||||
// Load default private key.
|
||||
privateKeys = []*ecdsa.PrivateKey{priv}
|
||||
privateKey = priv
|
||||
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
|
||||
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
|
||||
@@ -62,15 +59,10 @@ func TestSender(t *testing.T) {
|
||||
|
||||
t.Run("test new sender", testNewSender)
|
||||
t.Run("test pending limit", testPendLimit)
|
||||
|
||||
t.Run("test min gas limit", testMinGasLimit)
|
||||
t.Run("test resubmit transaction", testResubmitTransaction)
|
||||
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
|
||||
t.Run("test check pending transaction", testCheckPendingTransaction)
|
||||
|
||||
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
|
||||
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
|
||||
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
|
||||
}
|
||||
|
||||
func testNewSender(t *testing.T) {
|
||||
@@ -78,7 +70,7 @@ func testNewSender(t *testing.T) {
|
||||
// exit by Stop()
|
||||
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy1.TxType = txType
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKeys)
|
||||
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
newSender1.Stop()
|
||||
|
||||
@@ -86,7 +78,7 @@ func testNewSender(t *testing.T) {
|
||||
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy2.TxType = txType
|
||||
subCtx, cancel := context.WithCancel(context.Background())
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKeys)
|
||||
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
cancel()
|
||||
}
|
||||
@@ -98,7 +90,7 @@ func testPendLimit(t *testing.T) {
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = 2
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
for i := 0; i < 2*newSender.PendingLimit(); i++ {
|
||||
@@ -115,7 +107,7 @@ func testMinGasLimit(t *testing.T) {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
client, err := ethclient.Dial(cfgCopy.Endpoint)
|
||||
@@ -143,13 +135,12 @@ func testResubmitTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
_, err = s.resubmitTransaction(feeData, auth, tx)
|
||||
_, err = s.resubmitTransaction(feeData, s.auth, tx)
|
||||
assert.NoError(t, err)
|
||||
s.Stop()
|
||||
}
|
||||
@@ -160,17 +151,16 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
s.baseFeePerGas = 1000
|
||||
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
|
||||
assert.NoError(t, err)
|
||||
// bump the basefee by 10x
|
||||
s.baseFeePerGas *= 10
|
||||
// resubmit and check that the gas fee has been adjusted accordingly
|
||||
newTx, err := s.resubmitTransaction(feeData, auth, tx)
|
||||
newTx, err := s.resubmitTransaction(feeData, s.auth, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
|
||||
@@ -196,14 +186,13 @@ func testCheckPendingTransaction(t *testing.T) {
|
||||
for _, txType := range txTypes {
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.TxType = txType
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
|
||||
confirmed := uint64(100)
|
||||
receipt := &types.Receipt{Status: types.ReceiptStatusSuccessful, BlockNumber: big.NewInt(90)}
|
||||
auth := s.auths.getAccount()
|
||||
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
|
||||
|
||||
testCases := []struct {
|
||||
name string
|
||||
@@ -280,66 +269,3 @@ func testCheckPendingTransaction(t *testing.T) {
|
||||
s.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
func testBatchSender(t *testing.T, batchSize int) {
|
||||
for _, txType := range txTypes {
|
||||
for len(privateKeys) < batchSize {
|
||||
priv, err := crypto.GenerateKey()
|
||||
assert.NoError(t, err)
|
||||
privateKeys = append(privateKeys, priv)
|
||||
}
|
||||
|
||||
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
|
||||
cfgCopy.Confirmations = rpc.LatestBlockNumber
|
||||
cfgCopy.PendingLimit = batchSize * TXBatch
|
||||
cfgCopy.TxType = txType
|
||||
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// send transactions
|
||||
var (
|
||||
eg errgroup.Group
|
||||
idCache = cmap.New()
|
||||
confirmCh = newSender.ConfirmChan()
|
||||
)
|
||||
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
|
||||
index := idx
|
||||
eg.Go(func() error {
|
||||
for i := 0; i < TXBatch; i++ {
|
||||
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
|
||||
id := strconv.Itoa(i + index*1000)
|
||||
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
|
||||
if errors.Is(err, ErrNoAvailableAccount) || errors.Is(err, ErrFullPending) {
|
||||
<-time.After(time.Second)
|
||||
continue
|
||||
}
|
||||
assert.NoError(t, err)
|
||||
idCache.Set(id, struct{}{})
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
assert.NoError(t, eg.Wait())
|
||||
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
|
||||
|
||||
// avoid 10 mins cause testcase panic
|
||||
after := time.After(80 * time.Second)
|
||||
isDone := false
|
||||
for !isDone {
|
||||
select {
|
||||
case cmsg := <-confirmCh:
|
||||
assert.Equal(t, true, cmsg.IsSuccessful)
|
||||
_, exist := idCache.Pop(cmsg.ID)
|
||||
assert.Equal(t, true, exist)
|
||||
// Receive all confirmed txs.
|
||||
if idCache.Count() == 0 {
|
||||
isDone = true
|
||||
}
|
||||
case <-after:
|
||||
t.Error("newSender test failed because of timeout")
|
||||
isDone = true
|
||||
}
|
||||
}
|
||||
newSender.Stop()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
|
||||
@@ -26,13 +28,28 @@ type BatchProposer struct {
|
||||
maxChunkNumPerBatch uint64
|
||||
maxL1CommitGasPerBatch uint64
|
||||
maxL1CommitCalldataSizePerBatch uint32
|
||||
minChunkNumPerBatch uint64
|
||||
batchTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
batchProposerCircleTotal prometheus.Counter
|
||||
proposeBatchFailureTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoTotal prometheus.Counter
|
||||
proposeBatchUpdateInfoFailureTotal prometheus.Counter
|
||||
totalL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
batchChunksNum prometheus.Gauge
|
||||
batchFirstBlockTimeoutReached prometheus.Counter
|
||||
batchChunksProposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewBatchProposer creates a new BatchProposer instance.
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
|
||||
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
|
||||
log.Debug("new batch proposer",
|
||||
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
|
||||
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
|
||||
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
"batchTimeoutSec", cfg.BatchTimeoutSec)
|
||||
|
||||
return &BatchProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
@@ -42,25 +59,65 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
|
||||
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
|
||||
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
|
||||
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
|
||||
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
|
||||
batchTimeoutSec: cfg.BatchTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_failure_circle_total",
|
||||
Help: "Total number of propose batch total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_update_info_total",
|
||||
Help: "Total number of propose batch update info total.",
|
||||
}),
|
||||
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_update_info_failure_total",
|
||||
Help: "Total number of propose batch update info failure total.",
|
||||
}),
|
||||
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_total_l1_commit_gas",
|
||||
Help: "The total l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_total_l1_call_data_size",
|
||||
Help: "The total l1 call data size",
|
||||
}),
|
||||
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_batch_chunks_number",
|
||||
Help: "The number of chunks in the batch",
|
||||
}),
|
||||
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_first_block_timeout_reached_total",
|
||||
Help: "Total times of batch's first block timeout reached",
|
||||
}),
|
||||
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
|
||||
Help: "Total number of batch chunk propose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeBatch tries to propose a new batches.
|
||||
func (p *BatchProposer) TryProposeBatch() {
|
||||
dbChunks, err := p.proposeBatchChunks()
|
||||
p.batchProposerCircleTotal.Inc()
|
||||
dbChunks, batchMeta, err := p.proposeBatchChunks()
|
||||
if err != nil {
|
||||
p.proposeBatchFailureTotal.Inc()
|
||||
log.Error("proposeBatchChunks failed", "err", err)
|
||||
return
|
||||
}
|
||||
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
|
||||
if err := p.updateBatchInfoInDB(dbChunks, batchMeta); err != nil {
|
||||
p.proposeBatchUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update batch info in db failed", "err", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk, batchMeta *types.BatchMeta) error {
|
||||
p.proposeBatchUpdateInfoTotal.Inc()
|
||||
numChunks := len(dbChunks)
|
||||
if numChunks <= 0 {
|
||||
return nil
|
||||
@@ -70,17 +127,20 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
return err
|
||||
}
|
||||
|
||||
startChunkIndex := dbChunks[0].Index
|
||||
startChunkHash := dbChunks[0].Hash
|
||||
endChunkIndex := dbChunks[numChunks-1].Index
|
||||
endChunkHash := dbChunks[numChunks-1].Hash
|
||||
batchMeta.StartChunkIndex = dbChunks[0].Index
|
||||
batchMeta.StartChunkHash = dbChunks[0].Hash
|
||||
batchMeta.EndChunkIndex = dbChunks[numChunks-1].Index
|
||||
batchMeta.EndChunkHash = dbChunks[numChunks-1].Hash
|
||||
err = p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
|
||||
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, chunks, batchMeta, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure",
|
||||
"start chunk index", batchMeta.StartChunkIndex, "end chunk index", batchMeta.EndChunkIndex, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
|
||||
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, batchMeta.StartChunkIndex, batchMeta.EndChunkIndex, batch.Hash, dbTX)
|
||||
if dbErr != nil {
|
||||
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
|
||||
return dbErr
|
||||
}
|
||||
return nil
|
||||
@@ -88,93 +148,106 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
dbChunks, err := p.chunkOrm.GetUnbatchedChunks(p.ctx)
|
||||
func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, *types.BatchMeta, error) {
|
||||
unbatchedChunkIndex, err := p.batchOrm.GetFirstUnbatchedChunkIndex(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
dbChunks, err := p.chunkOrm.GetChunksGEIndex(p.ctx, unbatchedChunkIndex, int(p.maxChunkNumPerBatch)+1)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
if len(dbChunks) == 0 {
|
||||
return nil, nil
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
firstChunk := dbChunks[0]
|
||||
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas := firstChunk.TotalL1CommitGas
|
||||
totalChunks := uint64(1)
|
||||
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
|
||||
var totalL1CommitCalldataSize uint32
|
||||
var totalL1CommitGas uint64
|
||||
var totalChunks uint64
|
||||
var totalL1MessagePopped uint64
|
||||
var batchMeta types.BatchMeta
|
||||
|
||||
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
// Add extra gas costs
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += 16 // version in calldata
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
|
||||
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
|
||||
totalL1CommitGas += 20000 // 1 time sstore
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
|
||||
|
||||
// adjusting gas:
|
||||
// add 1 time cold sload (2100 gas) for L1MessageQueue
|
||||
// add 1 time cold address access (2600 gas) for L1MessageQueue
|
||||
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
|
||||
totalL1CommitGas += (2100 + 2600 - 100 - 100)
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
|
||||
if parentBatch != nil {
|
||||
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
|
||||
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
|
||||
}
|
||||
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstChunk.StartBlockNumber,
|
||||
firstChunk.EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
for i, chunk := range dbChunks {
|
||||
// metric values
|
||||
batchMeta.TotalL1CommitGas = totalL1CommitGas
|
||||
batchMeta.TotalL1CommitCalldataSize = totalL1CommitCalldataSize
|
||||
|
||||
for i, chunk := range dbChunks[1:] {
|
||||
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
|
||||
totalL1CommitGas += chunk.TotalL1CommitGas
|
||||
// adjust batch data hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
|
||||
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
|
||||
totalChunks++
|
||||
totalL1CommitGas += getKeccakGas(32 * totalChunks)
|
||||
// adjust batch header hash gas cost
|
||||
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
|
||||
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
|
||||
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
|
||||
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
|
||||
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if totalChunks > p.maxChunkNumPerBatch ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
|
||||
return dbChunks[:i+1], nil
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
// Check if the first chunk breaks hard limits.
|
||||
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
|
||||
if i == 0 {
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerBatch,
|
||||
)
|
||||
}
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
|
||||
return nil, nil, fmt.Errorf(
|
||||
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
|
||||
dbChunks[0].StartBlockNumber,
|
||||
dbChunks[0].EndBlockNumber,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerBatch,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in batching",
|
||||
"currentTotalChunks", totalChunks,
|
||||
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
|
||||
|
||||
p.totalL1CommitGas.Set(float64(batchMeta.TotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(batchMeta.TotalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(i))
|
||||
return dbChunks[:i], &batchMeta, nil
|
||||
}
|
||||
}
|
||||
|
||||
var hasChunkTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
@@ -182,16 +255,18 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
|
||||
"first block timestamp", dbChunks[0].StartBlockTime,
|
||||
"chunk outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasChunkTimeout = true
|
||||
batchMeta.TotalL1CommitGas = totalL1CommitGas
|
||||
batchMeta.TotalL1CommitCalldataSize = totalL1CommitCalldataSize
|
||||
p.batchFirstBlockTimeoutReached.Inc()
|
||||
p.totalL1CommitGas.Set(float64(batchMeta.TotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(batchMeta.TotalL1CommitCalldataSize))
|
||||
p.batchChunksNum.Set(float64(len(dbChunks)))
|
||||
return dbChunks, &batchMeta, nil
|
||||
}
|
||||
|
||||
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
|
||||
log.Warn("The chunk number of the batch is less than the minimum limit",
|
||||
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return dbChunks, nil
|
||||
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
|
||||
p.batchChunksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {
|
||||
|
||||
@@ -23,30 +23,22 @@ func testBatchProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.Empty(t, chunks)
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
// get all batches.
|
||||
batches, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
@@ -57,6 +49,7 @@ func testBatchProposer(t *testing.T) {
|
||||
assert.Equal(t, types.RollupPending, types.RollupStatus(batches[0].RollupStatus))
|
||||
assert.Equal(t, types.ProvingTaskUnassigned, types.ProvingStatus(batches[0].ProvingStatus))
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
dbChunks, err := chunkOrm.GetChunksInRange(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, batches, 1)
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"gorm.io/gorm"
|
||||
@@ -16,6 +18,10 @@ import (
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
// maxNumBlockPerChunk is the maximum number of blocks we allow per chunk.
|
||||
// Normally we will pack much fewer blocks because of other limits.
|
||||
const maxNumBlockPerChunk int = 100
|
||||
|
||||
// chunkRowConsumption is map(sub-circuit name => sub-circuit row count)
|
||||
type chunkRowConsumption map[string]uint64
|
||||
|
||||
@@ -49,43 +55,111 @@ type ChunkProposer struct {
|
||||
chunkOrm *orm.Chunk
|
||||
l2BlockOrm *orm.L2Block
|
||||
|
||||
maxTxGasPerChunk uint64
|
||||
maxL2TxNumPerChunk uint64
|
||||
maxTxNumPerChunk uint64
|
||||
maxL1CommitGasPerChunk uint64
|
||||
maxL1CommitCalldataSizePerChunk uint64
|
||||
minL1CommitCalldataSizePerChunk uint64
|
||||
maxRowConsumptionPerChunk uint64
|
||||
chunkTimeoutSec uint64
|
||||
gasCostIncreaseMultiplier float64
|
||||
|
||||
chunkProposerCircleTotal prometheus.Counter
|
||||
proposeChunkFailureTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoTotal prometheus.Counter
|
||||
proposeChunkUpdateInfoFailureTotal prometheus.Counter
|
||||
chunkTxNum prometheus.Gauge
|
||||
chunkEstimateL1CommitGas prometheus.Gauge
|
||||
totalL1CommitCalldataSize prometheus.Gauge
|
||||
totalTxGasUsed prometheus.Gauge
|
||||
maxTxConsumption prometheus.Gauge
|
||||
chunkBlocksNum prometheus.Gauge
|
||||
chunkFirstBlockTimeoutReached prometheus.Counter
|
||||
chunkBlocksProposeNotEnoughTotal prometheus.Counter
|
||||
}
|
||||
|
||||
// NewChunkProposer creates a new ChunkProposer instance.
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
|
||||
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
|
||||
log.Debug("new chunk proposer",
|
||||
"maxTxNumPerChunk", cfg.MaxTxNumPerChunk,
|
||||
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
|
||||
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
|
||||
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
|
||||
|
||||
return &ChunkProposer{
|
||||
ctx: ctx,
|
||||
db: db,
|
||||
chunkOrm: orm.NewChunk(db),
|
||||
l2BlockOrm: orm.NewL2Block(db),
|
||||
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
|
||||
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
|
||||
maxTxNumPerChunk: cfg.MaxTxNumPerChunk,
|
||||
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
|
||||
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
|
||||
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
|
||||
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
|
||||
chunkTimeoutSec: cfg.ChunkTimeoutSec,
|
||||
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
|
||||
|
||||
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_circle_total",
|
||||
Help: "Total number of propose chunk total.",
|
||||
}),
|
||||
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_failure_circle_total",
|
||||
Help: "Total number of propose chunk failure total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_update_info_total",
|
||||
Help: "Total number of propose chunk update info total.",
|
||||
}),
|
||||
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_update_info_failure_total",
|
||||
Help: "Total number of propose chunk update info failure total.",
|
||||
}),
|
||||
chunkTxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_tx_num",
|
||||
Help: "The chunk tx num",
|
||||
}),
|
||||
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
|
||||
Help: "The chunk estimate l1 commit gas",
|
||||
}),
|
||||
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
|
||||
Help: "The total l1 commit call data size",
|
||||
}),
|
||||
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_total_tx_gas_used",
|
||||
Help: "The total tx gas used",
|
||||
}),
|
||||
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_max_tx_consumption",
|
||||
Help: "The max tx consumption",
|
||||
}),
|
||||
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_propose_chunk_chunk_block_number",
|
||||
Help: "The number of blocks in the chunk",
|
||||
}),
|
||||
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
|
||||
Help: "Total times of chunk's first block timeout reached",
|
||||
}),
|
||||
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
|
||||
Help: "Total number of chunk block propose not enough",
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// TryProposeChunk tries to propose a new chunk.
|
||||
func (p *ChunkProposer) TryProposeChunk() {
|
||||
p.chunkProposerCircleTotal.Inc()
|
||||
proposedChunk, err := p.proposeChunk()
|
||||
if err != nil {
|
||||
p.proposeChunkFailureTotal.Inc()
|
||||
log.Error("propose new chunk failed", "err", err)
|
||||
return
|
||||
}
|
||||
|
||||
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
|
||||
p.proposeChunkUpdateInfoFailureTotal.Inc()
|
||||
log.Error("update chunk info in orm failed", "err", err)
|
||||
}
|
||||
}
|
||||
@@ -95,9 +169,11 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
p.proposeChunkUpdateInfoTotal.Inc()
|
||||
err := p.db.Transaction(func(dbTX *gorm.DB) error {
|
||||
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
|
||||
if err != nil {
|
||||
log.Warn("ChunkProposer.InsertChunk failed", "chunk hash", chunk.Hash)
|
||||
return err
|
||||
}
|
||||
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
|
||||
@@ -110,7 +186,12 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
|
||||
}
|
||||
|
||||
func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
blocks, err := p.l2BlockOrm.GetUnchunkedBlocks(p.ctx)
|
||||
unchunkedBlockHeight, err := p.chunkOrm.GetUnchunkedBlockHeight(p.ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
blocks, err := p.l2BlockOrm.GetL2WrappedBlocksGEHeight(p.ctx, unchunkedBlockHeight, maxNumBlockPerChunk)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -119,88 +200,98 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
chunk := &types.Chunk{Blocks: blocks[:1]}
|
||||
firstBlock := chunk.Blocks[0]
|
||||
totalTxGasUsed := firstBlock.Header.GasUsed
|
||||
totalL2TxNum := firstBlock.L2TxsNum()
|
||||
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
|
||||
var chunk types.Chunk
|
||||
var totalTxGasUsed uint64
|
||||
var totalTxNum uint64
|
||||
var totalL1CommitCalldataSize uint64
|
||||
var totalL1CommitGas uint64
|
||||
crc := chunkRowConsumption{}
|
||||
totalL1CommitGas := chunk.EstimateL1CommitGas()
|
||||
|
||||
if err := crc.add(firstBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
for i, block := range blocks {
|
||||
// metric values
|
||||
lastTotalTxNum := totalTxNum
|
||||
lastTotalL1CommitGas := totalL1CommitGas
|
||||
lastCrcMax := crc.max()
|
||||
lastTotalL1CommitCalldataSize := totalL1CommitCalldataSize
|
||||
lastTotalTxGasUsed := totalTxGasUsed
|
||||
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if totalL2TxNum > p.maxL2TxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL2TxNum,
|
||||
p.maxL2TxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
// Check if the first block breaks any soft limits.
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk {
|
||||
log.Warn(
|
||||
"The first block in chunk exceeds l2 tx gas limit",
|
||||
"block number", firstBlock.Header.Number,
|
||||
"gas used", totalTxGasUsed,
|
||||
"max gas limit", p.maxTxGasPerChunk,
|
||||
)
|
||||
}
|
||||
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
firstBlock.Header.Number,
|
||||
crc,
|
||||
max,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
for _, block := range blocks[1:] {
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
totalTxGasUsed += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalTxNum += uint64(len(block.Transactions))
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
totalL1CommitGas = chunk.EstimateL1CommitGas()
|
||||
|
||||
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
|
||||
if err := crc.add(block.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
|
||||
}
|
||||
crcMax := crc.max()
|
||||
|
||||
if totalTxGasUsed > p.maxTxGasPerChunk ||
|
||||
totalL2TxNum > p.maxL2TxNumPerChunk ||
|
||||
if totalTxNum > p.maxTxNumPerChunk ||
|
||||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
|
||||
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
|
||||
crc.max() > p.maxRowConsumptionPerChunk {
|
||||
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
|
||||
break
|
||||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
|
||||
crcMax > p.maxRowConsumptionPerChunk {
|
||||
// Check if the first block breaks hard limits.
|
||||
// If so, it indicates there are bugs in sequencer, manual fix is needed.
|
||||
if i == 0 {
|
||||
if totalTxNum > p.maxTxNumPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
|
||||
block.Header.Number,
|
||||
totalTxNum,
|
||||
p.maxTxNumPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
|
||||
block.Header.Number,
|
||||
totalL1CommitGas,
|
||||
p.maxL1CommitGasPerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
|
||||
block.Header.Number,
|
||||
totalL1CommitCalldataSize,
|
||||
p.maxL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
}
|
||||
|
||||
if crcMax > p.maxRowConsumptionPerChunk {
|
||||
return nil, fmt.Errorf(
|
||||
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
|
||||
block.Header.Number,
|
||||
crc,
|
||||
crcMax,
|
||||
p.maxRowConsumptionPerChunk,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
log.Debug("breaking limit condition in chunking",
|
||||
"totalTxNum", totalTxNum,
|
||||
"maxTxNumPerChunk", p.maxTxNumPerChunk,
|
||||
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
|
||||
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
|
||||
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
|
||||
"chunkRowConsumptionMax", crcMax,
|
||||
"chunkRowConsumption", crc,
|
||||
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
|
||||
|
||||
p.chunkTxNum.Set(float64(lastTotalTxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(lastTotalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(lastTotalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(lastCrcMax))
|
||||
p.totalTxGasUsed.Set(float64(lastTotalTxGasUsed))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
}
|
||||
chunk.Blocks = append(chunk.Blocks, block)
|
||||
}
|
||||
|
||||
var hasBlockTimeout bool
|
||||
currentTimeSec := uint64(time.Now().Unix())
|
||||
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
|
||||
log.Warn("first block timeout",
|
||||
@@ -208,15 +299,17 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
|
||||
"block timestamp", blocks[0].Header.Time,
|
||||
"block outdated time threshold", currentTimeSec,
|
||||
)
|
||||
hasBlockTimeout = true
|
||||
p.chunkFirstBlockTimeoutReached.Inc()
|
||||
p.chunkTxNum.Set(float64(totalTxNum))
|
||||
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
|
||||
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
|
||||
p.maxTxConsumption.Set(float64(crc.max()))
|
||||
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
|
||||
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
|
||||
return &chunk, nil
|
||||
}
|
||||
|
||||
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
|
||||
log.Warn("The calldata size of the chunk is less than the minimum limit",
|
||||
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
|
||||
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
|
||||
)
|
||||
return nil, nil
|
||||
}
|
||||
return chunk, nil
|
||||
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
|
||||
p.chunkBlocksProposeNotEnoughTotal.Inc()
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@@ -23,14 +23,12 @@ func testChunkProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
expectedChunk := &types.Chunk{
|
||||
@@ -40,7 +38,7 @@ func testChunkProposer(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Equal(t, expectedHash.Hex(), chunks[0].Hash)
|
||||
@@ -55,18 +53,16 @@ func testChunkProposerRowConsumption(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 0, // !
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 0)
|
||||
}
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -11,11 +12,9 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -23,12 +22,6 @@ import (
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
|
||||
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
type rollupEvent struct {
|
||||
batchHash common.Hash
|
||||
txHash common.Hash
|
||||
@@ -46,9 +39,6 @@ type L1WatcherClient struct {
|
||||
// The number of new blocks to wait for a block to be confirmed
|
||||
confirmations rpc.BlockNumber
|
||||
|
||||
messengerAddress common.Address
|
||||
messengerABI *abi.ABI
|
||||
|
||||
messageQueueAddress common.Address
|
||||
messageQueueABI *abi.ABI
|
||||
|
||||
@@ -59,10 +49,12 @@ type L1WatcherClient struct {
|
||||
processedMsgHeight uint64
|
||||
// The height of the block that the watcher has retrieved header rlp
|
||||
processedBlockHeight uint64
|
||||
|
||||
metrics *l1WatcherMetrics
|
||||
}
|
||||
|
||||
// NewL1WatcherClient returns a new instance of L1WatcherClient.
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
|
||||
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
|
||||
if err != nil {
|
||||
@@ -91,9 +83,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
batchOrm: orm.NewBatch(db),
|
||||
confirmations: confirmations,
|
||||
|
||||
messengerAddress: messengerAddress,
|
||||
messengerABI: bridgeAbi.L1ScrollMessengerABI,
|
||||
|
||||
messageQueueAddress: messageQueueAddress,
|
||||
messageQueueABI: bridgeAbi.L1MessageQueueABI,
|
||||
|
||||
@@ -102,6 +91,7 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
|
||||
|
||||
processedMsgHeight: uint64(savedHeight),
|
||||
processedBlockHeight: savedL1BlockHeight,
|
||||
metrics: initL1WatcherMetrics(reg),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -125,6 +115,7 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
|
||||
|
||||
// FetchBlockHeader pull latest L1 blocks and save in DB
|
||||
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
|
||||
fromBlock := int64(w.processedBlockHeight) + 1
|
||||
toBlock := int64(blockHeight)
|
||||
if toBlock < fromBlock {
|
||||
@@ -171,6 +162,7 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
|
||||
|
||||
// update processed height
|
||||
w.processedBlockHeight = uint64(toBlock)
|
||||
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -189,6 +181,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
toBlock := int64(blockHeight)
|
||||
|
||||
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
|
||||
w.metrics.l1WatcherFetchContractEventTotal.Inc()
|
||||
to := from + contractEventsBlocksFetchLimit - 1
|
||||
|
||||
if to > toBlock {
|
||||
@@ -200,18 +193,15 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
FromBlock: big.NewInt(from), // inclusive
|
||||
ToBlock: big.NewInt(to), // inclusive
|
||||
Addresses: []common.Address{
|
||||
w.messengerAddress,
|
||||
w.scrollChainAddress,
|
||||
w.messageQueueAddress,
|
||||
},
|
||||
Topics: make([][]common.Hash, 1),
|
||||
}
|
||||
query.Topics[0] = make([]common.Hash, 5)
|
||||
query.Topics[0] = make([]common.Hash, 3)
|
||||
query.Topics[0][0] = bridgeAbi.L1QueueTransactionEventSignature
|
||||
query.Topics[0][1] = bridgeAbi.L1RelayedMessageEventSignature
|
||||
query.Topics[0][2] = bridgeAbi.L1FailedRelayedMessageEventSignature
|
||||
query.Topics[0][3] = bridgeAbi.L1CommitBatchEventSignature
|
||||
query.Topics[0][4] = bridgeAbi.L1FinalizeBatchEventSignature
|
||||
query.Topics[0][1] = bridgeAbi.L1CommitBatchEventSignature
|
||||
query.Topics[0][2] = bridgeAbi.L1FinalizeBatchEventSignature
|
||||
|
||||
logs, err := w.client.FilterLogs(w.ctx, query)
|
||||
if err != nil {
|
||||
@@ -220,9 +210,10 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(to))
|
||||
continue
|
||||
}
|
||||
|
||||
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
|
||||
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
|
||||
@@ -232,8 +223,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
sentMessageCount := int64(len(sentMessageEvents))
|
||||
rollupEventCount := int64(len(rollupEvents))
|
||||
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
|
||||
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
|
||||
w.metrics.l1WatcherFetchContractEventSentEventsTotal.Add(float64(sentMessageCount))
|
||||
w.metrics.l1WatcherFetchContractEventRollupEventsTotal.Add(float64(rollupEventCount))
|
||||
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
|
||||
|
||||
// use rollup event to update rollup results db status
|
||||
@@ -273,7 +264,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL1MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.l1WatcherFetchContractEventSuccessTotal.Inc()
|
||||
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(w.processedMsgHeight))
|
||||
}
|
||||
|
||||
return nil
|
||||
|
||||
59
bridge/internal/controller/watcher/l1_watcher_metrics.go
Normal file
59
bridge/internal/controller/watcher/l1_watcher_metrics.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l1WatcherMetrics struct {
|
||||
l1WatcherFetchBlockHeaderTotal prometheus.Counter
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight prometheus.Gauge
|
||||
l1WatcherFetchContractEventTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventSuccessTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventProcessedBlockHeight prometheus.Gauge
|
||||
l1WatcherFetchContractEventSentEventsTotal prometheus.Counter
|
||||
l1WatcherFetchContractEventRollupEventsTotal prometheus.Counter
|
||||
}
|
||||
|
||||
var (
|
||||
initL1WatcherMetricOnce sync.Once
|
||||
l1WatcherMetric *l1WatcherMetrics
|
||||
)
|
||||
|
||||
func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
|
||||
initL1WatcherMetricOnce.Do(func() {
|
||||
l1WatcherMetric = &l1WatcherMetrics{
|
||||
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_header_total",
|
||||
Help: "The total number of l1 watcher fetch block header total",
|
||||
}),
|
||||
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch block header",
|
||||
}),
|
||||
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
|
||||
Help: "The total number of l1 watcher fetch contract event total",
|
||||
}),
|
||||
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
|
||||
Help: "The total number of l1 watcher fetch contract event success total",
|
||||
}),
|
||||
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
|
||||
Help: "The current processed block height of l1 watcher fetch contract event",
|
||||
}),
|
||||
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract sent event",
|
||||
}),
|
||||
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
|
||||
Help: "The current processed block height of l1 watcher fetch contract rollup event",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l1WatcherMetric
|
||||
}
|
||||
@@ -30,7 +30,7 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
|
||||
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
|
||||
assert.NoError(t, err)
|
||||
l1Cfg := cfg.L1Config
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
|
||||
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
|
||||
assert.NoError(t, watcher.FetchContractEvent())
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"math/big"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
geth "github.com/scroll-tech/go-ethereum"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
@@ -13,11 +14,9 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/ethclient"
|
||||
"github.com/scroll-tech/go-ethereum/event"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"gorm.io/gorm"
|
||||
|
||||
"scroll-tech/common/metrics"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
bridgeAbi "scroll-tech/bridge/abi"
|
||||
@@ -25,14 +24,6 @@ import (
|
||||
"scroll-tech/bridge/internal/utils"
|
||||
)
|
||||
|
||||
// Metrics
|
||||
var (
|
||||
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
|
||||
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
|
||||
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
|
||||
)
|
||||
|
||||
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
|
||||
type L2WatcherClient struct {
|
||||
ctx context.Context
|
||||
@@ -56,10 +47,12 @@ type L2WatcherClient struct {
|
||||
processedMsgHeight uint64
|
||||
|
||||
stopped uint64
|
||||
|
||||
metrics *l2WatcherMetrics
|
||||
}
|
||||
|
||||
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
|
||||
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
var savedHeight uint64
|
||||
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
|
||||
@@ -93,6 +86,7 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
|
||||
withdrawTrieRootSlot: withdrawTrieRootSlot,
|
||||
|
||||
stopped: 0,
|
||||
metrics: initL2WatcherMetrics(reg),
|
||||
}
|
||||
|
||||
return &w
|
||||
@@ -102,6 +96,7 @@ const blockTracesFetchLimit = uint64(10)
|
||||
|
||||
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
|
||||
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
w.metrics.fetchRunningMissingBlocksTotal.Inc()
|
||||
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
|
||||
if err != nil {
|
||||
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
|
||||
@@ -120,8 +115,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
|
||||
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
|
||||
return
|
||||
}
|
||||
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
|
||||
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
|
||||
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
|
||||
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -199,6 +194,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
|
||||
}()
|
||||
|
||||
w.metrics.fetchContractEventTotal.Inc()
|
||||
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
|
||||
if err != nil {
|
||||
log.Error("failed to get block number", "err", err)
|
||||
@@ -238,7 +234,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
if len(logs) == 0 {
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.fetchContractEventHeight.Set(float64(to))
|
||||
continue
|
||||
}
|
||||
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
|
||||
@@ -250,7 +246,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
relayedMessageCount := int64(len(relayedMessageEvents))
|
||||
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
|
||||
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
|
||||
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
|
||||
|
||||
// Update relayed message first to make sure we don't forget to update submited message.
|
||||
@@ -269,7 +265,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
|
||||
}
|
||||
|
||||
w.processedMsgHeight = uint64(to)
|
||||
bridgeL2MsgsSyncHeightGauge.Update(to)
|
||||
w.metrics.fetchContractEventHeight.Set(float64(to))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
54
bridge/internal/controller/watcher/l2_watcher_metrics.go
Normal file
54
bridge/internal/controller/watcher/l2_watcher_metrics.go
Normal file
@@ -0,0 +1,54 @@
|
||||
package watcher
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
type l2WatcherMetrics struct {
|
||||
fetchRunningMissingBlocksTotal prometheus.Counter
|
||||
fetchRunningMissingBlocksHeight prometheus.Gauge
|
||||
fetchContractEventTotal prometheus.Counter
|
||||
fetchContractEventHeight prometheus.Gauge
|
||||
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
|
||||
bridgeL2BlocksFetchedGap prometheus.Gauge
|
||||
}
|
||||
|
||||
var (
|
||||
initL2WatcherMetricOnce sync.Once
|
||||
l2WatcherMetric *l2WatcherMetrics
|
||||
)
|
||||
|
||||
func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
|
||||
initL2WatcherMetricOnce.Do(func() {
|
||||
l2WatcherMetric = &l2WatcherMetrics{
|
||||
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks",
|
||||
}),
|
||||
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
|
||||
Help: "The total number of l2 watcher fetch running missing blocks height",
|
||||
}),
|
||||
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_fetch_contract_events_total",
|
||||
Help: "The total number of l2 watcher fetch contract events",
|
||||
}),
|
||||
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_fetch_contract_height",
|
||||
Help: "The total number of l2 watcher fetch contract height",
|
||||
}),
|
||||
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
|
||||
Name: "bridge_l2_watcher_msg_relayed_events_total",
|
||||
Help: "The total number of l2 watcher msg relayed event",
|
||||
}),
|
||||
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
|
||||
Name: "bridge_l2_watcher_blocks_fetched_gap",
|
||||
Help: "The gap of l2 fetch",
|
||||
}),
|
||||
}
|
||||
})
|
||||
return l2WatcherMetric
|
||||
}
|
||||
@@ -34,7 +34,8 @@ import (
|
||||
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
|
||||
db := setupDB(t)
|
||||
l2cfg := cfg.L2Config
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
|
||||
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
|
||||
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
|
||||
return watcher, db
|
||||
}
|
||||
|
||||
@@ -50,7 +51,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
|
||||
|
||||
l1cfg := cfg.L1Config
|
||||
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
|
||||
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.GasOracleSenderPrivateKey, "test", "test", nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create several transactions and commit to block
|
||||
@@ -71,7 +72,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
_, db := setupL2Watcher(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
|
||||
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.GasOracleSenderPrivateKey)
|
||||
|
||||
// deploy mock bridge
|
||||
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
|
||||
@@ -95,7 +96,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
|
||||
|
||||
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
|
||||
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
|
||||
}
|
||||
|
||||
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
|
||||
|
||||
@@ -53,9 +53,11 @@ type Batch struct {
|
||||
OracleTxHash string `json:"oracle_tx_hash" gorm:"column:oracle_tx_hash;default:NULL"`
|
||||
|
||||
// metadata
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
TotalL1CommitGas uint64 `json:"total_l1_commit_gas" gorm:"column:total_l1_commit_gas;default:0"`
|
||||
TotalL1CommitCalldataSize uint32 `json:"total_l1_commit_calldata_size" gorm:"column:total_l1_commit_calldata_size;default:0"`
|
||||
CreatedAt time.Time `json:"created_at" gorm:"column:created_at"`
|
||||
UpdatedAt time.Time `json:"updated_at" gorm:"column:updated_at"`
|
||||
DeletedAt gorm.DeletedAt `json:"deleted_at" gorm:"column:deleted_at;default:NULL"`
|
||||
}
|
||||
|
||||
// NewBatch creates a new Batch database instance.
|
||||
@@ -142,6 +144,21 @@ func (o *Batch) GetLatestBatch(ctx context.Context) (*Batch, error) {
|
||||
return &latestBatch, nil
|
||||
}
|
||||
|
||||
// GetFirstUnbatchedChunkIndex retrieves the first unbatched chunk index.
|
||||
func (o *Batch) GetFirstUnbatchedChunkIndex(ctx context.Context) (uint64, error) {
|
||||
// Get the latest batch
|
||||
latestBatch, err := o.GetLatestBatch(ctx)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
|
||||
}
|
||||
// if parentBatch==nil then err==gorm.ErrRecordNotFound,
|
||||
// which means there is not batched chunk yet, thus returns 0
|
||||
if latestBatch == nil {
|
||||
return 0, nil
|
||||
}
|
||||
return latestBatch.EndChunkIndex + 1, nil
|
||||
}
|
||||
|
||||
// GetRollupStatusByHashList retrieves the rollup statuses for a list of batch hashes.
|
||||
func (o *Batch) GetRollupStatusByHashList(ctx context.Context, hashes []string) ([]types.RollupStatus, error) {
|
||||
if len(hashes) == 0 {
|
||||
@@ -209,7 +226,7 @@ func (o *Batch) GetBatchByIndex(ctx context.Context, index uint64) (*Batch, erro
|
||||
}
|
||||
|
||||
// InsertBatch inserts a new batch into the database.
|
||||
func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex uint64, startChunkHash, endChunkHash string, chunks []*types.Chunk, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
func (o *Batch) InsertBatch(ctx context.Context, chunks []*types.Chunk, batchMeta *types.BatchMeta, dbTX ...*gorm.DB) (*Batch, error) {
|
||||
if len(chunks) == 0 {
|
||||
return nil, errors.New("invalid args")
|
||||
}
|
||||
@@ -255,20 +272,22 @@ func (o *Batch) InsertBatch(ctx context.Context, startChunkIndex, endChunkIndex
|
||||
lastChunkBlockNum := len(chunks[numChunks-1].Blocks)
|
||||
|
||||
newBatch := Batch{
|
||||
Index: batchIndex,
|
||||
Hash: batchHeader.Hash().Hex(),
|
||||
StartChunkHash: startChunkHash,
|
||||
StartChunkIndex: startChunkIndex,
|
||||
EndChunkHash: endChunkHash,
|
||||
EndChunkIndex: endChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
|
||||
ParentBatchHash: parentBatchHash.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
Index: batchIndex,
|
||||
Hash: batchHeader.Hash().Hex(),
|
||||
StartChunkHash: batchMeta.StartChunkHash,
|
||||
StartChunkIndex: batchMeta.StartChunkIndex,
|
||||
EndChunkHash: batchMeta.EndChunkHash,
|
||||
EndChunkIndex: batchMeta.EndChunkIndex,
|
||||
StateRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].Header.Root.Hex(),
|
||||
WithdrawRoot: chunks[numChunks-1].Blocks[lastChunkBlockNum-1].WithdrawRoot.Hex(),
|
||||
ParentBatchHash: parentBatchHash.Hex(),
|
||||
BatchHeader: batchHeader.Encode(),
|
||||
ChunkProofsStatus: int16(types.ChunkProofsStatusPending),
|
||||
ProvingStatus: int16(types.ProvingTaskUnassigned),
|
||||
RollupStatus: int16(types.RollupPending),
|
||||
OracleStatus: int16(types.GasOraclePending),
|
||||
TotalL1CommitGas: batchMeta.TotalL1CommitGas,
|
||||
TotalL1CommitCalldataSize: batchMeta.TotalL1CommitCalldataSize,
|
||||
}
|
||||
|
||||
db := o.db
|
||||
@@ -311,7 +330,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
|
||||
@@ -87,20 +87,6 @@ func (o *Chunk) GetChunksInRange(ctx context.Context, startIndex uint64, endInde
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetUnbatchedChunks retrieves unbatched chunks from the database.
|
||||
func (o *Chunk) GetUnbatchedChunks(ctx context.Context) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("batch_hash IS NULL")
|
||||
db = db.Order("index asc")
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetUnbatchedChunks error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// GetLatestChunk retrieves the latest chunk from the database.
|
||||
func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
@@ -114,6 +100,40 @@ func (o *Chunk) GetLatestChunk(ctx context.Context) (*Chunk, error) {
|
||||
return &latestChunk, nil
|
||||
}
|
||||
|
||||
// GetUnchunkedBlockHeight retrieves the first unchunked block number.
|
||||
func (o *Chunk) GetUnchunkedBlockHeight(ctx context.Context) (uint64, error) {
|
||||
// Get the latest chunk
|
||||
latestChunk, err := o.GetLatestChunk(ctx)
|
||||
if err != nil {
|
||||
if errors.Is(err, gorm.ErrRecordNotFound) {
|
||||
// if there is no chunk, return block number 1,
|
||||
// because no need to chunk genesis block number
|
||||
return 1, nil
|
||||
}
|
||||
return 0, fmt.Errorf("Chunk.GetChunkedBlockHeight error: %w", err)
|
||||
}
|
||||
return latestChunk.EndBlockNumber + 1, nil
|
||||
}
|
||||
|
||||
// GetChunksGEIndex retrieves chunks that have a chunk index greater than the or equal to the given index.
|
||||
// The returned chunks are sorted in ascending order by their index.
|
||||
func (o *Chunk) GetChunksGEIndex(ctx context.Context, index uint64, limit int) ([]*Chunk, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&Chunk{})
|
||||
db = db.Where("index >= ?", index)
|
||||
db = db.Order("index ASC")
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var chunks []*Chunk
|
||||
if err := db.Find(&chunks).Error; err != nil {
|
||||
return nil, fmt.Errorf("Chunk.GetChunksGEIndex error: %w", err)
|
||||
}
|
||||
return chunks, nil
|
||||
}
|
||||
|
||||
// InsertChunk inserts a new chunk into the database.
|
||||
func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*gorm.DB) (*Chunk, error) {
|
||||
if chunk == nil || len(chunk.Blocks) == 0 {
|
||||
@@ -151,7 +171,7 @@ func (o *Chunk) InsertChunk(ctx context.Context, chunk *types.Chunk, dbTX ...*go
|
||||
var totalL1CommitCalldataSize uint64
|
||||
for _, block := range chunk.Blocks {
|
||||
totalL2TxGas += block.Header.GasUsed
|
||||
totalL2TxNum += block.L2TxsNum()
|
||||
totalL2TxNum += block.NumL2Transactions()
|
||||
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
|
||||
}
|
||||
|
||||
@@ -201,7 +221,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
|
||||
updateFields["prover_assigned_at"] = time.Now()
|
||||
case types.ProvingTaskUnassigned:
|
||||
updateFields["prover_assigned_at"] = nil
|
||||
case types.ProvingTaskProved, types.ProvingTaskVerified:
|
||||
case types.ProvingTaskVerified:
|
||||
updateFields["proved_at"] = time.Now()
|
||||
}
|
||||
|
||||
|
||||
@@ -64,18 +64,23 @@ func (o *L2Block) GetL2BlocksLatestHeight(ctx context.Context) (uint64, error) {
|
||||
return maxNumber, nil
|
||||
}
|
||||
|
||||
// GetUnchunkedBlocks get the l2 blocks that have not been put into a chunk.
|
||||
// GetL2WrappedBlocksGEHeight retrieves L2 blocks that have a block number greater than or equal to the given height.
|
||||
// The blocks are converted into WrappedBlock format for output.
|
||||
// The returned blocks are sorted in ascending order by their block number.
|
||||
func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock, error) {
|
||||
func (o *L2Block) GetL2WrappedBlocksGEHeight(ctx context.Context, height uint64, limit int) ([]*types.WrappedBlock, error) {
|
||||
db := o.db.WithContext(ctx)
|
||||
db = db.Model(&L2Block{})
|
||||
db = db.Select("header, transactions, withdraw_root, row_consumption")
|
||||
db = db.Where("chunk_hash IS NULL")
|
||||
db = db.Where("number >= ?", height)
|
||||
db = db.Order("number ASC")
|
||||
|
||||
if limit > 0 {
|
||||
db = db.Limit(limit)
|
||||
}
|
||||
|
||||
var l2Blocks []L2Block
|
||||
if err := db.Find(&l2Blocks).Error; err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
|
||||
}
|
||||
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
@@ -83,18 +88,18 @@ func (o *L2Block) GetUnchunkedBlocks(ctx context.Context) ([]*types.WrappedBlock
|
||||
var wrappedBlock types.WrappedBlock
|
||||
|
||||
if err := json.Unmarshal([]byte(v.Transactions), &wrappedBlock.Transactions); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlock.Header = &gethTypes.Header{}
|
||||
if err := json.Unmarshal([]byte(v.Header), wrappedBlock.Header); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlock.WithdrawRoot = common.HexToHash(v.WithdrawRoot)
|
||||
|
||||
if err := json.Unmarshal([]byte(v.RowConsumption), &wrappedBlock.RowConsumption); err != nil {
|
||||
return nil, fmt.Errorf("L2Block.GetUnchunkedBlocks error: %w", err)
|
||||
return nil, fmt.Errorf("L2Block.GetL2WrappedBlocksGEHeight error: %w", err)
|
||||
}
|
||||
|
||||
wrappedBlocks = append(wrappedBlocks, &wrappedBlock)
|
||||
|
||||
@@ -101,25 +101,26 @@ func TestL2BlockOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, uint64(3), height)
|
||||
|
||||
blocks, err := l2BlockOrm.GetUnchunkedBlocks(context.Background())
|
||||
blocks, err := l2BlockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, wrappedBlock1, blocks[0])
|
||||
assert.Equal(t, wrappedBlock2, blocks[1])
|
||||
assert.Equal(t, "", blocks[0].ChunkHash)
|
||||
assert.Equal(t, "", blocks[1].ChunkHash)
|
||||
|
||||
blocks, err = l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
|
||||
wrappedBlocks, err := l2BlockOrm.GetL2BlocksInRange(context.Background(), 2, 3)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, wrappedBlock1, blocks[0])
|
||||
assert.Equal(t, wrappedBlock2, blocks[1])
|
||||
assert.Equal(t, wrappedBlock1, wrappedBlocks[0])
|
||||
assert.Equal(t, wrappedBlock2, wrappedBlocks[1])
|
||||
|
||||
err = l2BlockOrm.UpdateChunkHashInRange(context.Background(), 2, 2, "test hash")
|
||||
assert.NoError(t, err)
|
||||
|
||||
blocks, err = l2BlockOrm.GetUnchunkedBlocks(context.Background())
|
||||
blocks, err = l2BlockOrm.GetL2Blocks(context.Background(), map[string]interface{}{}, []string{}, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, blocks, 1)
|
||||
assert.Equal(t, wrappedBlock2, blocks[0])
|
||||
assert.Len(t, blocks, 2)
|
||||
assert.Equal(t, "test hash", blocks[0].ChunkHash)
|
||||
assert.Equal(t, "", blocks[1].ChunkHash)
|
||||
}
|
||||
|
||||
func TestChunkOrm(t *testing.T) {
|
||||
@@ -135,11 +136,13 @@ func TestChunkOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, dbChunk2.Hash, chunkHash2.Hex())
|
||||
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, "", chunks[0].BatchHash)
|
||||
assert.Equal(t, "", chunks[1].BatchHash)
|
||||
|
||||
err = chunkOrm.UpdateProvingStatus(context.Background(), chunkHash1.Hex(), types.ProvingTaskVerified)
|
||||
assert.NoError(t, err)
|
||||
@@ -156,9 +159,13 @@ func TestChunkOrm(t *testing.T) {
|
||||
|
||||
err = chunkOrm.UpdateBatchHashInRange(context.Background(), 0, 0, "test hash")
|
||||
assert.NoError(t, err)
|
||||
chunks, err = chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err = chunkOrm.GetChunksGEIndex(context.Background(), 0, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
assert.Len(t, chunks, 2)
|
||||
assert.Equal(t, chunkHash1.Hex(), chunks[0].Hash)
|
||||
assert.Equal(t, chunkHash2.Hex(), chunks[1].Hash)
|
||||
assert.Equal(t, "test hash", chunks[0].BatchHash)
|
||||
assert.Equal(t, "", chunks[1].BatchHash)
|
||||
}
|
||||
|
||||
func TestBatchOrm(t *testing.T) {
|
||||
@@ -166,7 +173,13 @@ func TestBatchOrm(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.NoError(t, migrate.ResetDB(sqlDB))
|
||||
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash1.Hex(), chunkHash1.Hex(), []*types.Chunk{chunk1})
|
||||
batchMeta1 := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash1.Hex(),
|
||||
EndChunkIndex: 0,
|
||||
EndChunkHash: chunkHash1.Hex(),
|
||||
}
|
||||
batch1, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk1}, batchMeta1)
|
||||
assert.NoError(t, err)
|
||||
hash1 := batch1.Hash
|
||||
|
||||
@@ -177,7 +190,13 @@ func TestBatchOrm(t *testing.T) {
|
||||
batchHash1 := batchHeader1.Hash().Hex()
|
||||
assert.Equal(t, hash1, batchHash1)
|
||||
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), 1, 1, chunkHash2.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk2})
|
||||
batchMeta2 := &types.BatchMeta{
|
||||
StartChunkIndex: 1,
|
||||
StartChunkHash: chunkHash2.Hex(),
|
||||
EndChunkIndex: 1,
|
||||
EndChunkHash: chunkHash2.Hex(),
|
||||
}
|
||||
batch2, err := batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk2}, batchMeta2)
|
||||
assert.NoError(t, err)
|
||||
hash2 := batch2.Hash
|
||||
|
||||
|
||||
@@ -2,8 +2,11 @@ package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
@@ -13,6 +16,7 @@ import (
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/docker"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/database/migrate"
|
||||
|
||||
@@ -32,17 +36,9 @@ var (
|
||||
l1Auth *bind.TransactOpts
|
||||
l2Auth *bind.TransactOpts
|
||||
|
||||
// l1 messenger contract
|
||||
l1MessengerInstance *mock_bridge.MockBridgeL1
|
||||
l1MessengerAddress common.Address
|
||||
|
||||
// l1 rollup contract
|
||||
scrollChainInstance *mock_bridge.MockBridgeL1
|
||||
scrollChainAddress common.Address
|
||||
|
||||
// l2 messenger contract
|
||||
l2MessengerInstance *mock_bridge.MockBridgeL2
|
||||
l2MessengerAddress common.Address
|
||||
)
|
||||
|
||||
func setupDB(t *testing.T) *gorm.DB {
|
||||
@@ -83,51 +79,51 @@ func setupEnv(t *testing.T) {
|
||||
l2Cfg.Confirmations = 0
|
||||
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
|
||||
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L1gethImg.ChainID())
|
||||
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.CommitSenderPrivateKey, base.L1gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L2gethImg.ChainID())
|
||||
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.GasOracleSenderPrivateKey, base.L2gethImg.ChainID())
|
||||
assert.NoError(t, err)
|
||||
}
|
||||
|
||||
func mockChainMonitorServer(baseURL string) (*http.Server, error) {
|
||||
router := gin.New()
|
||||
r := router.Group("/v1")
|
||||
r.GET("/batch_status", func(ctx *gin.Context) {
|
||||
ctx.JSON(http.StatusOK, struct {
|
||||
ErrCode int `json:"errcode"`
|
||||
ErrMsg string `json:"errmsg"`
|
||||
Data bool `json:"data"`
|
||||
}{
|
||||
ErrCode: 0,
|
||||
ErrMsg: "",
|
||||
Data: true,
|
||||
})
|
||||
})
|
||||
return utils.StartHTTPServer(strings.Split(baseURL, "//")[1], router)
|
||||
}
|
||||
|
||||
func prepareContracts(t *testing.T) {
|
||||
var err error
|
||||
var tx *types.Transaction
|
||||
|
||||
// L1 messenger contract
|
||||
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L1 ScrolChain contract
|
||||
_, tx, scrollChainInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
|
||||
assert.NoError(t, err)
|
||||
scrollChainAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// L2 messenger contract
|
||||
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
|
||||
assert.NoError(t, err)
|
||||
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
|
||||
assert.NoError(t, err)
|
||||
|
||||
l1Config, l2Config := bridgeApp.Config.L1Config, bridgeApp.Config.L2Config
|
||||
l1Config.L1MessengerAddress = l1MessengerAddress
|
||||
l1Config.L1MessageQueueAddress = l1MessengerAddress
|
||||
l1Config.ScrollChainContractAddress = scrollChainAddress
|
||||
l1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
|
||||
l1Config.RelayerConfig.GasPriceOracleContractAddress = l1MessengerAddress
|
||||
|
||||
l2Config.L2MessengerAddress = l2MessengerAddress
|
||||
l2Config.L2MessageQueueAddress = l2MessengerAddress
|
||||
l2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
|
||||
l2Config.RelayerConfig.RollupContractAddress = scrollChainAddress
|
||||
l2Config.RelayerConfig.GasPriceOracleContractAddress = l2MessengerAddress
|
||||
}
|
||||
|
||||
func TestFunction(t *testing.T) {
|
||||
setupEnv(t)
|
||||
srv, err := mockChainMonitorServer(bridgeApp.Config.L2Config.RelayerConfig.ChainMonitor.BaseURL)
|
||||
assert.NoError(t, err)
|
||||
defer srv.Close()
|
||||
|
||||
// process start test
|
||||
t.Run("TestProcessStart", testProcessStart)
|
||||
@@ -137,7 +133,6 @@ func TestFunction(t *testing.T) {
|
||||
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
|
||||
|
||||
// l1 message
|
||||
t.Run("TestRelayL1MessageSucceed", testRelayL1MessageSucceed)
|
||||
|
||||
// l2 message
|
||||
// TODO: add a "user relay l2msg Succeed" test
|
||||
|
||||
@@ -26,13 +26,13 @@ func testImportL1GasPrice(t *testing.T) {
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
startHeight, err := l1Client.BlockNumber(context.Background())
|
||||
assert.NoError(t, err)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// fetch new blocks
|
||||
number, err := l1Client.BlockNumber(context.Background())
|
||||
@@ -67,7 +67,7 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
prepareContracts(t)
|
||||
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// add fake chunk
|
||||
@@ -89,8 +89,14 @@ func testImportL2GasPrice(t *testing.T) {
|
||||
chunkHash, err := chunk.Hash(0)
|
||||
assert.NoError(t, err)
|
||||
|
||||
batchMeta := &types.BatchMeta{
|
||||
StartChunkIndex: 0,
|
||||
StartChunkHash: chunkHash.Hex(),
|
||||
EndChunkIndex: 0,
|
||||
EndChunkHash: chunkHash.Hex(),
|
||||
}
|
||||
batchOrm := orm.NewBatch(db)
|
||||
_, err = batchOrm.InsertBatch(context.Background(), 0, 0, chunkHash.Hex(), chunkHash.Hex(), []*types.Chunk{chunk})
|
||||
_, err = batchOrm.InsertBatch(context.Background(), []*types.Chunk{chunk}, batchMeta)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// check db status
|
||||
|
||||
@@ -1,81 +0,0 @@
|
||||
package tests
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
geth_types "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/stretchr/testify/assert"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
"scroll-tech/bridge/internal/controller/watcher"
|
||||
"scroll-tech/bridge/internal/orm"
|
||||
)
|
||||
|
||||
func testRelayL1MessageSucceed(t *testing.T) {
|
||||
db := setupDB(t)
|
||||
defer database.CloseDB(db)
|
||||
|
||||
prepareContracts(t)
|
||||
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
|
||||
// Create L1Relayer
|
||||
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
|
||||
assert.NoError(t, err)
|
||||
// Create L1Watcher
|
||||
confirmations := rpc.LatestBlockNumber
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
|
||||
// Create L2Watcher
|
||||
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
|
||||
|
||||
// send message through l1 messenger contract
|
||||
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})
|
||||
assert.NoError(t, err)
|
||||
sendTx, err := l1MessengerInstance.SendMessage(l1Auth, l2Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
|
||||
assert.NoError(t, err)
|
||||
sendReceipt, err := bind.WaitMined(context.Background(), l1Client, sendTx)
|
||||
assert.NoError(t, err)
|
||||
if sendReceipt.Status != geth_types.ReceiptStatusSuccessful || err != nil {
|
||||
t.Fatalf("Call failed")
|
||||
}
|
||||
|
||||
// l1 watch process events
|
||||
l1Watcher.FetchContractEvent()
|
||||
|
||||
l1MessageOrm := orm.NewL1Message(db)
|
||||
// check db status
|
||||
msg, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg.Status), types.MsgPending)
|
||||
assert.Equal(t, msg.Target, l2Auth.From.String())
|
||||
|
||||
// process l1 messages
|
||||
l1Relayer.ProcessSavedEvents()
|
||||
|
||||
l1Message, err := l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.NotEmpty(t, l1Message.Layer2Hash)
|
||||
assert.Equal(t, types.MsgStatus(l1Message.Status), types.MsgSubmitted)
|
||||
|
||||
relayTx, _, err := l2Client.TransactionByHash(context.Background(), common.HexToHash(l1Message.Layer2Hash))
|
||||
assert.NoError(t, err)
|
||||
relayTxReceipt, err := bind.WaitMined(context.Background(), l2Client, relayTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(relayTxReceipt.Logs), 1)
|
||||
|
||||
// fetch message relayed events
|
||||
l2Watcher.FetchContractEvent()
|
||||
msg, err = l1MessageOrm.GetL1MessageByQueueIndex(nonce.Uint64())
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, types.MsgStatus(msg.Status), types.MsgConfirmed)
|
||||
}
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
_ "scroll-tech/bridge/cmd/event_watcher/app"
|
||||
_ "scroll-tech/bridge/cmd/gas_oracle/app"
|
||||
_ "scroll-tech/bridge/cmd/msg_relayer/app"
|
||||
_ "scroll-tech/bridge/cmd/rollup_relayer/app"
|
||||
|
||||
"scroll-tech/common/database"
|
||||
@@ -23,7 +22,6 @@ func testProcessStart(t *testing.T) {
|
||||
|
||||
bridgeApp.RunApp(t, cutils.EventWatcherApp)
|
||||
bridgeApp.RunApp(t, cutils.GasOracleApp)
|
||||
bridgeApp.RunApp(t, cutils.MessageRelayerApp)
|
||||
bridgeApp.RunApp(t, cutils.RollupRelayerApp)
|
||||
|
||||
bridgeApp.WaitExit()
|
||||
@@ -43,11 +41,6 @@ func testProcessStartEnableMetrics(t *testing.T) {
|
||||
svrPort = strconv.FormatInt(port.Int64()+50000, 10)
|
||||
bridgeApp.RunApp(t, cutils.GasOracleApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(2000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+50000, 10)
|
||||
bridgeApp.RunApp(t, cutils.MessageRelayerApp, "--metrics", "--metrics.addr", "localhost", "--metrics.port", svrPort)
|
||||
|
||||
port, err = rand.Int(rand.Reader, big.NewInt(2000))
|
||||
assert.NoError(t, err)
|
||||
svrPort = strconv.FormatInt(port.Int64()+50000, 10)
|
||||
|
||||
@@ -5,7 +5,6 @@ import (
|
||||
"math/big"
|
||||
"testing"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/stretchr/testify/assert"
|
||||
@@ -13,6 +12,7 @@ import (
|
||||
"scroll-tech/common/database"
|
||||
"scroll-tech/common/types"
|
||||
"scroll-tech/common/types/message"
|
||||
"scroll-tech/common/utils"
|
||||
|
||||
"scroll-tech/bridge/internal/config"
|
||||
"scroll-tech/bridge/internal/controller/relayer"
|
||||
@@ -28,12 +28,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
|
||||
// Create L2Relayer
|
||||
l2Cfg := bridgeApp.Config.L2Config
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
|
||||
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
|
||||
assert.NoError(t, err)
|
||||
|
||||
// Create L1Watcher
|
||||
l1Cfg := bridgeApp.Config.L1Config
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
|
||||
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
|
||||
|
||||
// add some blocks to db
|
||||
var wrappedBlocks []*types.WrappedBlock
|
||||
@@ -57,18 +57,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
|
||||
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
|
||||
MaxTxGasPerChunk: 1000000000,
|
||||
MaxL2TxNumPerChunk: 10000,
|
||||
MaxTxNumPerChunk: 10000,
|
||||
MaxL1CommitGasPerChunk: 50000000000,
|
||||
MaxL1CommitCalldataSizePerChunk: 1000000,
|
||||
MinL1CommitCalldataSizePerChunk: 0,
|
||||
MaxRowConsumptionPerChunk: 1048319,
|
||||
ChunkTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
cp.TryProposeChunk()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
unbatchedChunkIndex, err := batchOrm.GetFirstUnbatchedChunkIndex(context.Background())
|
||||
assert.NoError(t, err)
|
||||
|
||||
chunkOrm := orm.NewChunk(db)
|
||||
chunks, err := chunkOrm.GetUnbatchedChunks(context.Background())
|
||||
chunks, err := chunkOrm.GetChunksGEIndex(context.Background(), unbatchedChunkIndex, 0)
|
||||
assert.NoError(t, err)
|
||||
assert.Len(t, chunks, 1)
|
||||
|
||||
@@ -76,14 +78,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
MaxChunkNumPerBatch: 10,
|
||||
MaxL1CommitGasPerBatch: 50000000000,
|
||||
MaxL1CommitCalldataSizePerBatch: 1000000,
|
||||
MinChunkNumPerBatch: 1,
|
||||
BatchTimeoutSec: 300,
|
||||
}, db)
|
||||
}, db, nil)
|
||||
bp.TryProposeBatch()
|
||||
|
||||
l2Relayer.ProcessPendingBatches()
|
||||
|
||||
batchOrm := orm.NewBatch(db)
|
||||
batch, err := batchOrm.GetLatestBatch(context.Background())
|
||||
assert.NoError(t, err)
|
||||
assert.NotNil(t, batch)
|
||||
@@ -91,20 +91,22 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotEmpty(t, batch.CommitTxHash)
|
||||
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
|
||||
|
||||
assert.NoError(t, err)
|
||||
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
assert.NoError(t, err)
|
||||
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(commitTxReceipt.Logs), 1)
|
||||
success := utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupCommitted, statuses[0])
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
|
||||
// add dummy proof
|
||||
proof := &message.BatchProof{
|
||||
@@ -118,7 +120,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
// process committed batch and check status
|
||||
l2Relayer.ProcessCommittedBatches()
|
||||
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalizing, statuses[0])
|
||||
@@ -128,17 +130,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
|
||||
assert.NotNil(t, batch)
|
||||
assert.NotEmpty(t, batch.FinalizeTxHash)
|
||||
|
||||
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
assert.NoError(t, err)
|
||||
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
var receipt *gethTypes.Receipt
|
||||
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
|
||||
return err == nil && receipt.Status == 1
|
||||
})
|
||||
assert.True(t, success)
|
||||
|
||||
// fetch rollup events
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, 1, len(statuses))
|
||||
assert.Equal(t, types.RollupFinalized, statuses[0])
|
||||
success = utils.TryTimes(30, func() bool {
|
||||
err = l1Watcher.FetchContractEvent()
|
||||
assert.NoError(t, err)
|
||||
var statuses []types.RollupStatus
|
||||
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
|
||||
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
|
||||
})
|
||||
assert.True(t, success)
|
||||
}
|
||||
|
||||
@@ -37,7 +37,7 @@ COPY . .
|
||||
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
|
||||
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
|
||||
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
|
||||
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv internal/logic/verifier/lib /bin/
|
||||
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
|
||||
|
||||
# Pull coordinator into a second stage deploy alpine container
|
||||
FROM ubuntu:20.04
|
||||
@@ -46,6 +46,6 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
|
||||
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
|
||||
COPY --from=builder /bin/coordinator /bin/
|
||||
|
||||
RUN /bin/coordinator --version
|
||||
|
||||
ENTRYPOINT ["/bin/coordinator"]
|
||||
|
||||
@@ -1,5 +0,0 @@
|
||||
assets/
|
||||
docs/
|
||||
l2geth/
|
||||
rpc-gateway/
|
||||
*target/*
|
||||
@@ -11,18 +11,21 @@ COPY ./prover-stats-api/go.* ./prover-stats-api/
|
||||
COPY ./prover/go.* ./prover/
|
||||
COPY ./tests/integration-test/go.* ./tests/integration-test/
|
||||
COPY ./bridge-history-api/go.* ./bridge-history-api/
|
||||
# Support mainland environment.
|
||||
#ENV GOPROXY="https://goproxy.cn,direct"
|
||||
RUN go mod download -x
|
||||
|
||||
# Build msg_relayer
|
||||
|
||||
# Build prover-stats-api
|
||||
FROM base as builder
|
||||
|
||||
RUN --mount=target=. \
|
||||
--mount=type=cache,target=/root/.cache/go-build \
|
||||
cd /src/bridge/cmd/msg_relayer/ && go build -v -p 4 -o /bin/msg_relayer
|
||||
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
|
||||
|
||||
# Pull msg_relayer into a second stage deploy alpine container
|
||||
# Pull prover-stats-api into a second stage deploy alpine container \
|
||||
FROM alpine:latest
|
||||
|
||||
COPY --from=builder /bin/msg_relayer /bin/
|
||||
COPY --from=builder /bin/prover-stats-api /bin/
|
||||
|
||||
ENTRYPOINT ["msg_relayer"]
|
||||
ENTRYPOINT ["prover-stats-api"]
|
||||
@@ -5,8 +5,9 @@ import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
cmap "github.com/orcaman/concurrent-map"
|
||||
)
|
||||
|
||||
@@ -26,8 +27,9 @@ type Cmd struct {
|
||||
name string
|
||||
args []string
|
||||
|
||||
mu sync.Mutex
|
||||
cmd *exec.Cmd
|
||||
isRunning uint64
|
||||
cmd *exec.Cmd
|
||||
app *exec.Cmd
|
||||
|
||||
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
|
||||
|
||||
@@ -38,13 +40,23 @@ type Cmd struct {
|
||||
}
|
||||
|
||||
// NewCmd create Cmd instance.
|
||||
func NewCmd(name string, args ...string) *Cmd {
|
||||
return &Cmd{
|
||||
func NewCmd(name string, params ...string) *Cmd {
|
||||
cmd := &Cmd{
|
||||
checkFuncs: cmap.New(),
|
||||
name: name,
|
||||
args: args,
|
||||
args: params,
|
||||
ErrChan: make(chan error, 10),
|
||||
cmd: exec.Command(name, params...),
|
||||
app: &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{name}, params...),
|
||||
},
|
||||
}
|
||||
cmd.cmd.Stdout = cmd
|
||||
cmd.cmd.Stderr = cmd
|
||||
cmd.app.Stdout = cmd
|
||||
cmd.app.Stderr = cmd
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RegistFunc register check func
|
||||
@@ -58,15 +70,14 @@ func (c *Cmd) UnRegistFunc(key string) {
|
||||
}
|
||||
|
||||
func (c *Cmd) runCmd() {
|
||||
cmd := exec.Command(c.args[0], c.args[1:]...) //nolint:gosec
|
||||
cmd.Stdout = c
|
||||
cmd.Stderr = c
|
||||
c.ErrChan <- cmd.Run()
|
||||
fmt.Println("cmd:", append([]string{c.name}, c.args...))
|
||||
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
|
||||
c.ErrChan <- c.cmd.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// RunCmd parallel running when parallel is true.
|
||||
func (c *Cmd) RunCmd(parallel bool) {
|
||||
fmt.Println("cmd:", c.args)
|
||||
if parallel {
|
||||
go c.runCmd()
|
||||
} else {
|
||||
|
||||
@@ -3,41 +3,45 @@ package cmd
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
"syscall"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/reexec"
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
// IsRunning 1 started, 0 not started.
|
||||
func (c *Cmd) IsRunning() bool {
|
||||
return atomic.LoadUint64(&c.isRunning) == 1
|
||||
}
|
||||
|
||||
func (c *Cmd) runApp() {
|
||||
fmt.Println("cmd:", append([]string{c.name}, c.args...))
|
||||
if atomic.CompareAndSwapUint64(&c.isRunning, 0, 1) {
|
||||
c.ErrChan <- c.app.Run()
|
||||
}
|
||||
}
|
||||
|
||||
// RunApp exec's the current binary using name as argv[0] which will trigger the
|
||||
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
|
||||
func (c *Cmd) RunApp(waitResult func() bool) {
|
||||
fmt.Println("cmd: ", append([]string{c.name}, c.args...))
|
||||
cmd := &exec.Cmd{
|
||||
Path: reexec.Self(),
|
||||
Args: append([]string{c.name}, c.args...),
|
||||
Stderr: c,
|
||||
Stdout: c,
|
||||
}
|
||||
if waitResult != nil {
|
||||
go func() {
|
||||
_ = cmd.Run()
|
||||
c.runApp()
|
||||
}()
|
||||
waitResult()
|
||||
} else {
|
||||
_ = cmd.Run()
|
||||
c.runApp()
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
c.cmd = cmd
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// WaitExit wait util process exit.
|
||||
func (c *Cmd) WaitExit() {
|
||||
if atomic.LoadUint64(&c.isRunning) == 0 {
|
||||
return
|
||||
}
|
||||
// Wait all the check functions are finished, interrupt loop when appear error.
|
||||
var err error
|
||||
for err == nil && !c.checkFuncs.IsEmpty() {
|
||||
@@ -52,20 +56,18 @@ func (c *Cmd) WaitExit() {
|
||||
}
|
||||
|
||||
// Send interrupt signal.
|
||||
c.mu.Lock()
|
||||
_ = c.cmd.Process.Signal(os.Interrupt)
|
||||
// should use `_ = c.cmd.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
|
||||
_ = c.app.Process.Signal(os.Interrupt)
|
||||
// should use `_ = c.app.Process.Wait()` here, but we have some bugs in coordinator's graceful exit,
|
||||
// so we use `Kill` as a temp workaround. And since `WaitExit` is only used in integration tests, so
|
||||
// it won't really affect our functionalities.
|
||||
_ = c.cmd.Process.Kill()
|
||||
c.mu.Unlock()
|
||||
if err = c.app.Process.Signal(syscall.SIGTERM); err != nil {
|
||||
_ = c.app.Process.Kill()
|
||||
}
|
||||
}
|
||||
|
||||
// Interrupt send interrupt signal.
|
||||
func (c *Cmd) Interrupt() {
|
||||
c.mu.Lock()
|
||||
c.ErrChan <- c.cmd.Process.Signal(os.Interrupt)
|
||||
c.mu.Unlock()
|
||||
c.ErrChan <- c.app.Process.Signal(os.Interrupt)
|
||||
}
|
||||
|
||||
// WaitResult return true when get the keyword during timeout.
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
)
|
||||
|
||||
func TestCmd(t *testing.T) {
|
||||
app := cmd.NewCmd("curTime", "date", "+%Y-%m-%d")
|
||||
app := cmd.NewCmd("date", "+%Y-%m-%d")
|
||||
|
||||
tm := time.Now()
|
||||
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
|
||||
|
||||
@@ -27,6 +27,7 @@ var (
|
||||
|
||||
// AppAPI app interface.
|
||||
type AppAPI interface {
|
||||
IsRunning() bool
|
||||
WaitResult(t *testing.T, timeout time.Duration, keyword string) bool
|
||||
RunApp(waitResult func() bool)
|
||||
WaitExit()
|
||||
|
||||
@@ -36,7 +36,7 @@ func NewImgDB(image, password, dbName string, port int) ImgInstance {
|
||||
dbName: dbName,
|
||||
port: port,
|
||||
}
|
||||
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
|
||||
img.cmd = cmd.NewCmd("docker", img.prepare()...)
|
||||
return img
|
||||
}
|
||||
|
||||
@@ -89,7 +89,7 @@ func (i *ImgDB) IsRunning() bool {
|
||||
}
|
||||
|
||||
func (i *ImgDB) prepare() []string {
|
||||
cmd := []string{"docker", "run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
cmd := []string{"run", "--rm", "--name", i.name, "-p", fmt.Sprintf("%d:5432", i.port)}
|
||||
envs := []string{
|
||||
"-e", "POSTGRES_PASSWORD=" + i.password,
|
||||
"-e", fmt.Sprintf("POSTGRES_DB=%s", i.dbName),
|
||||
|
||||
@@ -42,7 +42,7 @@ func NewImgGeth(image, volume, ipc string, hPort, wPort int) GethImgInstance {
|
||||
httpPort: hPort,
|
||||
wsPort: wPort,
|
||||
}
|
||||
img.cmd = cmd.NewCmd(img.name, img.prepare()...)
|
||||
img.cmd = cmd.NewCmd("docker", img.params()...)
|
||||
return img
|
||||
}
|
||||
|
||||
@@ -149,8 +149,8 @@ func (i *ImgGeth) Stop() error {
|
||||
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
|
||||
}
|
||||
|
||||
func (i *ImgGeth) prepare() []string {
|
||||
cmds := []string{"docker", "run", "--rm", "--name", i.name}
|
||||
func (i *ImgGeth) params() []string {
|
||||
cmds := []string{"run", "--rm", "--name", i.name}
|
||||
var ports []string
|
||||
if i.httpPort != 0 {
|
||||
ports = append(ports, []string{"-p", strconv.Itoa(i.httpPort) + ":8545"}...)
|
||||
|
||||
File diff suppressed because one or more lines are too long
@@ -1,4 +1,4 @@
|
||||
FROM scrolltech/l2geth:scroll-v4.3.18
|
||||
FROM scrolltech/l2geth:scroll-v4.3.55
|
||||
|
||||
RUN mkdir -p /l2geth/keystore
|
||||
|
||||
|
||||
@@ -3,14 +3,18 @@ module scroll-tech/common
|
||||
go 1.19
|
||||
|
||||
require (
|
||||
github.com/bits-and-blooms/bitset v1.7.0
|
||||
github.com/docker/docker v23.0.6+incompatible
|
||||
github.com/gin-contrib/pprof v1.4.0
|
||||
github.com/gin-gonic/gin v1.9.1
|
||||
github.com/jmoiron/sqlx v1.3.5
|
||||
github.com/lib/pq v1.10.9
|
||||
github.com/mattn/go-colorable v0.1.13
|
||||
github.com/mattn/go-isatty v0.0.19
|
||||
github.com/modern-go/reflect2 v1.0.2
|
||||
github.com/orcaman/concurrent-map v1.0.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
|
||||
github.com/prometheus/client_golang v1.14.0
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc
|
||||
github.com/stretchr/testify v1.8.3
|
||||
github.com/urfave/cli/v2 v2.25.7
|
||||
gorm.io/driver/postgres v1.5.0
|
||||
@@ -21,8 +25,11 @@ require (
|
||||
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.1 // indirect
|
||||
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/btcsuite/btcd v0.20.1-beta // indirect
|
||||
github.com/bytedance/sonic v1.9.2 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
|
||||
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
|
||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||
github.com/deckarep/golang-set v1.8.0 // indirect
|
||||
@@ -33,15 +40,20 @@ require (
|
||||
github.com/edsrzf/mmap-go v1.0.0 // indirect
|
||||
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
|
||||
github.com/fsnotify/fsnotify v1.6.0 // indirect
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
|
||||
github.com/gin-contrib/sse v0.1.0 // indirect
|
||||
github.com/go-kit/kit v0.9.0 // indirect
|
||||
github.com/go-logfmt/logfmt v0.5.1 // indirect
|
||||
github.com/go-ole/go-ole v1.2.6 // indirect
|
||||
github.com/go-playground/locales v0.14.1 // indirect
|
||||
github.com/go-playground/universal-translator v0.18.1 // indirect
|
||||
github.com/go-playground/validator/v10 v10.14.1 // indirect
|
||||
github.com/go-sql-driver/mysql v1.7.1 // indirect
|
||||
github.com/go-stack/stack v1.8.1 // indirect
|
||||
github.com/goccy/go-json v0.10.2 // indirect
|
||||
github.com/gogo/protobuf v1.3.2 // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
|
||||
github.com/google/go-cmp v0.5.9 // indirect
|
||||
github.com/google/uuid v1.3.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.0 // indirect
|
||||
github.com/graph-gophers/graphql-go v1.3.0 // indirect
|
||||
@@ -60,13 +72,17 @@ require (
|
||||
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
|
||||
github.com/jinzhu/inflection v1.0.0 // indirect
|
||||
github.com/jinzhu/now v1.1.5 // indirect
|
||||
github.com/julienschmidt/httprouter v1.3.0 // indirect
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
|
||||
github.com/kr/pretty v0.3.1 // indirect
|
||||
github.com/leodido/go-urn v1.2.4 // indirect
|
||||
github.com/mattn/go-runewidth v0.0.14 // indirect
|
||||
github.com/mattn/go-sqlite3 v1.14.16 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
|
||||
github.com/mitchellh/mapstructure v1.5.0 // indirect
|
||||
github.com/mitchellh/pointerstructure v1.2.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/morikuni/aec v1.0.0 // indirect
|
||||
github.com/olekukonko/tablewriter v0.0.5 // indirect
|
||||
github.com/onsi/ginkgo v1.16.5 // indirect
|
||||
@@ -74,9 +90,13 @@ require (
|
||||
github.com/opencontainers/go-digest v1.0.0 // indirect
|
||||
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
|
||||
github.com/opentracing/opentracing-go v1.1.0 // indirect
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
|
||||
github.com/pkg/errors v0.9.1 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_model v0.3.0 // indirect
|
||||
github.com/prometheus/common v0.39.0 // indirect
|
||||
github.com/prometheus/procfs v0.9.0 // indirect
|
||||
github.com/prometheus/tsdb v0.7.1 // indirect
|
||||
github.com/rivo/uniseg v0.4.4 // indirect
|
||||
github.com/rjeczalik/notify v0.9.1 // indirect
|
||||
@@ -89,17 +109,21 @@ require (
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
|
||||
github.com/tklauser/go-sysconf v0.3.11 // indirect
|
||||
github.com/tklauser/numcpus v0.6.0 // indirect
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
|
||||
github.com/ugorji/go/codec v1.2.11 // indirect
|
||||
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
|
||||
github.com/yusufpapurcu/wmi v1.2.2 // indirect
|
||||
golang.org/x/crypto v0.11.0 // indirect
|
||||
golang.org/x/arch v0.4.0 // indirect
|
||||
golang.org/x/crypto v0.12.0 // indirect
|
||||
golang.org/x/mod v0.12.0 // indirect
|
||||
golang.org/x/net v0.12.0 // indirect
|
||||
golang.org/x/net v0.14.0 // indirect
|
||||
golang.org/x/sync v0.3.0 // indirect
|
||||
golang.org/x/sys v0.10.0 // indirect
|
||||
golang.org/x/text v0.11.0 // indirect
|
||||
golang.org/x/sys v0.11.0 // indirect
|
||||
golang.org/x/text v0.12.0 // indirect
|
||||
golang.org/x/time v0.3.0 // indirect
|
||||
golang.org/x/tools v0.11.0 // indirect
|
||||
google.golang.org/protobuf v1.31.0 // indirect
|
||||
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
|
||||
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
|
||||
107
common/go.sum
107
common/go.sum
@@ -38,6 +38,10 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
|
||||
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
|
||||
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
|
||||
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
|
||||
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
|
||||
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
|
||||
@@ -49,6 +53,9 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
|
||||
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
|
||||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
|
||||
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
|
||||
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
|
||||
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
|
||||
@@ -56,6 +63,9 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
|
||||
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
@@ -97,11 +107,20 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
||||
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
|
||||
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
|
||||
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
|
||||
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
|
||||
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
|
||||
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
|
||||
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
|
||||
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
|
||||
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
|
||||
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
|
||||
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
|
||||
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
|
||||
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
|
||||
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
|
||||
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
|
||||
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
|
||||
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
|
||||
@@ -113,11 +132,21 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
|
||||
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
|
||||
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
|
||||
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
|
||||
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
|
||||
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
|
||||
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
|
||||
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
|
||||
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
|
||||
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
|
||||
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
|
||||
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
|
||||
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
|
||||
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
|
||||
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
|
||||
@@ -126,6 +155,9 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
|
||||
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
|
||||
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
|
||||
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
|
||||
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
|
||||
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
|
||||
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
|
||||
@@ -142,12 +174,16 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
|
||||
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
|
||||
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
|
||||
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
|
||||
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
|
||||
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
|
||||
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
|
||||
@@ -163,7 +199,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
|
||||
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
|
||||
@@ -230,13 +266,14 @@ github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
|
||||
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
|
||||
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
|
||||
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
|
||||
@@ -245,6 +282,9 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
|
||||
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -261,6 +301,9 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
|
||||
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
|
||||
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
|
||||
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
|
||||
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
|
||||
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
|
||||
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
|
||||
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
|
||||
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
|
||||
@@ -278,6 +321,7 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
|
||||
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
|
||||
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
|
||||
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
|
||||
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
|
||||
@@ -291,6 +335,8 @@ github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwp
|
||||
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
|
||||
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
@@ -298,6 +344,8 @@ github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjU
|
||||
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
@@ -334,6 +382,9 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
|
||||
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
|
||||
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
|
||||
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
|
||||
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
|
||||
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
|
||||
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
|
||||
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
|
||||
@@ -349,14 +400,22 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
|
||||
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
|
||||
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
|
||||
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
|
||||
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
|
||||
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
|
||||
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
|
||||
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
|
||||
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
|
||||
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
|
||||
@@ -367,6 +426,7 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
|
||||
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
|
||||
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
|
||||
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
|
||||
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
|
||||
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
|
||||
@@ -374,8 +434,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
|
||||
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
|
||||
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
|
||||
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc h1:eK3NOpjgm/b2TQ6rYqWx92Zri0kBuxf6gKjjsVxWKr8=
|
||||
github.com/scroll-tech/go-ethereum v1.10.14-0.20230829000527-f883dcdc21fc/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
|
||||
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
|
||||
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
|
||||
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
|
||||
@@ -401,10 +461,12 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
||||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
|
||||
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
|
||||
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
|
||||
@@ -414,8 +476,14 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
|
||||
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
|
||||
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
|
||||
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
|
||||
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
|
||||
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
|
||||
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
|
||||
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
|
||||
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
|
||||
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
|
||||
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
|
||||
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
|
||||
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
|
||||
@@ -436,6 +504,9 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
|
||||
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
@@ -445,10 +516,11 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
|
||||
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
|
||||
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
|
||||
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
|
||||
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
|
||||
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
|
||||
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@@ -504,8 +576,8 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
||||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
|
||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||
golang.org/x/net v0.12.0 h1:cfawfvKITfUsFCeJIHJrbSxpeu/E81khclypR0GVT50=
|
||||
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
|
||||
golang.org/x/net v0.14.0 h1:BONx9s002vGdD9umnlX1Po8vOZmrgH34qlHcD1MfK14=
|
||||
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
@@ -560,6 +632,8 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
@@ -567,8 +641,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
|
||||
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
|
||||
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
@@ -578,10 +652,11 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
|
||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
|
||||
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
|
||||
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
@@ -668,6 +743,11 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
|
||||
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
|
||||
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
|
||||
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
@@ -688,6 +768,7 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
|
||||
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=
|
||||
|
||||
43
common/libzkp/impl/Cargo.lock
generated
43
common/libzkp/impl/Cargo.lock
generated
@@ -32,13 +32,14 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "aggregator"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"env_logger 0.10.0",
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
"halo2_proofs",
|
||||
"hex",
|
||||
"itertools",
|
||||
"log",
|
||||
"rand",
|
||||
@@ -432,7 +433,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
|
||||
[[package]]
|
||||
name = "bus-mapping"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -1048,7 +1049,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "eth-types"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"ethers-signers",
|
||||
@@ -1225,7 +1226,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "external-tracer"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"geth-utils",
|
||||
@@ -1438,7 +1439,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "gadgets"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"digest 0.7.6",
|
||||
"eth-types",
|
||||
@@ -1478,7 +1479,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "geth-utils"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
|
||||
@@ -1582,7 +1583,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-base"
|
||||
version = "0.2.2"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"halo2_proofs",
|
||||
@@ -1597,7 +1598,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-ecc"
|
||||
version = "0.2.2"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
|
||||
dependencies = [
|
||||
"ff",
|
||||
"group",
|
||||
@@ -1632,7 +1633,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2-mpt-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#1c11b6c9b1245073a76c3ce7100b6798060f7cb8"
|
||||
source = "git+https://github.com/scroll-tech/mpt-circuit.git?tag=v0.5.1#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
|
||||
dependencies = [
|
||||
"ethers-core",
|
||||
"halo2_proofs",
|
||||
@@ -1654,7 +1655,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "halo2_proofs"
|
||||
version = "0.2.0"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#4d8a405a97ce445c88fc8ec40f9a2dd0a661c697"
|
||||
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#19de67c07a9b9b567580466763f93ebfbc3bb799"
|
||||
dependencies = [
|
||||
"ark-std",
|
||||
"blake2b_simd",
|
||||
@@ -2076,7 +2077,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "keccak256"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
@@ -2263,7 +2264,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mock"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -2278,7 +2279,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "mpt-zktrie"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"bus-mapping",
|
||||
"eth-types",
|
||||
@@ -2753,8 +2754,8 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "prover"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.10#cb8f71475e6aa9fc78a24a832369fecb1c7d2201"
|
||||
version = "0.7.5"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
|
||||
dependencies = [
|
||||
"aggregator",
|
||||
"anyhow",
|
||||
@@ -3623,7 +3624,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
|
||||
[[package]]
|
||||
name = "snark-verifier"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#12c306ec57849921e690221b10b8a08189868d4a"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"ethereum-types 0.14.1",
|
||||
@@ -3647,7 +3648,7 @@ dependencies = [
|
||||
[[package]]
|
||||
name = "snark-verifier-sdk"
|
||||
version = "0.0.1"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#12c306ec57849921e690221b10b8a08189868d4a"
|
||||
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
|
||||
dependencies = [
|
||||
"bincode",
|
||||
"env_logger 0.10.0",
|
||||
@@ -4038,8 +4039,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
|
||||
|
||||
[[package]]
|
||||
name = "types"
|
||||
version = "0.4.0"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.10#cb8f71475e6aa9fc78a24a832369fecb1c7d2201"
|
||||
version = "0.7.5"
|
||||
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.5#9699d40940aed2f14d8e1958167d714bca2c9984"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"blake2",
|
||||
@@ -4490,10 +4491,11 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
|
||||
[[package]]
|
||||
name = "zkevm-circuits"
|
||||
version = "0.1.0"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.5.10#87cae118ffdcf3a085a7c3c24268f7a0df21fcd4"
|
||||
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.5#aa9a9aff698a5b253d1f3c29ea3d3006364777bf"
|
||||
dependencies = [
|
||||
"array-init",
|
||||
"bus-mapping",
|
||||
"either",
|
||||
"env_logger 0.9.3",
|
||||
"eth-types",
|
||||
"ethers-core",
|
||||
@@ -4534,6 +4536,7 @@ dependencies = [
|
||||
name = "zkp"
|
||||
version = "0.1.0"
|
||||
dependencies = [
|
||||
"base64 0.13.1",
|
||||
"env_logger 0.9.3",
|
||||
"halo2_proofs",
|
||||
"libc",
|
||||
|
||||
@@ -20,18 +20,18 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
|
||||
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
|
||||
|
||||
[dependencies]
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.10" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.10" }
|
||||
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
|
||||
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.5" }
|
||||
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
|
||||
|
||||
log = "0.4"
|
||||
base64 = "0.13.0"
|
||||
env_logger = "0.9.0"
|
||||
libc = "0.2"
|
||||
log = "0.4"
|
||||
once_cell = "1.8.0"
|
||||
serde = "1.0"
|
||||
serde_derive = "1.0"
|
||||
serde_json = "1.0.66"
|
||||
libc = "0.2"
|
||||
once_cell = "1.8.0"
|
||||
|
||||
|
||||
[profile.test]
|
||||
opt-level = 3
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
|
||||
use crate::{
|
||||
types::{CheckChunkProofsResponse, ProofResult},
|
||||
utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR},
|
||||
};
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
aggregator::{Prover, Verifier},
|
||||
utils::{chunk_trace_to_witness_block, init_env_and_log},
|
||||
BatchProof, ChunkHash, ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, panic, ptr::null};
|
||||
use std::{cell::OnceCell, env, panic, ptr::null};
|
||||
use types::eth::BlockTrace;
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -13,11 +16,15 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char) {
|
||||
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
|
||||
init_env_and_log("ffi_batch_prove");
|
||||
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let prover = Prover::from_params_dir(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let prover = Prover::from_dirs(params_dir, assets_dir);
|
||||
|
||||
PROVER.set(prover).unwrap();
|
||||
}
|
||||
@@ -30,39 +37,104 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier).unwrap();
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
|
||||
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
.flatten()
|
||||
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> *const c_char {
|
||||
let check_result: Result<bool, String> = panic::catch_unwind(|| {
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
|
||||
if chunk_proofs.is_empty() {
|
||||
return Err("provided chunk proofs are empty.".to_string());
|
||||
}
|
||||
|
||||
let prover_ref = PROVER.get().expect("failed to get reference to PROVER.");
|
||||
|
||||
let valid = prover_ref.check_chunk_proofs(&chunk_proofs);
|
||||
Ok(valid)
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
let r = match check_result {
|
||||
Ok(valid) => CheckChunkProofsResponse {
|
||||
ok: valid,
|
||||
error: None,
|
||||
},
|
||||
Err(err) => CheckChunkProofsResponse {
|
||||
ok: false,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_batch_proof(
|
||||
chunk_hashes: *const c_char,
|
||||
chunk_proofs: *const c_char,
|
||||
) -> *const c_char {
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
|
||||
let chunk_hashes = c_char_to_vec(chunk_hashes);
|
||||
let chunk_proofs = c_char_to_vec(chunk_proofs);
|
||||
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes).unwrap();
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
|
||||
assert_eq!(chunk_hashes.len(), chunk_proofs.len());
|
||||
let chunk_hashes = serde_json::from_slice::<Vec<ChunkHash>>(&chunk_hashes)
|
||||
.map_err(|e| format!("failed to deserialize chunk hashes: {e:?}"))?;
|
||||
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs)
|
||||
.map_err(|e| format!("failed to deserialize chunk proofs: {e:?}"))?;
|
||||
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.into_iter())
|
||||
.collect();
|
||||
if chunk_hashes.len() != chunk_proofs.len() {
|
||||
return Err(format!("chunk hashes and chunk proofs lengths mismatch: chunk_hashes.len() = {}, chunk_proofs.len() = {}",
|
||||
chunk_hashes.len(), chunk_proofs.len()));
|
||||
}
|
||||
|
||||
let chunk_hashes_proofs = chunk_hashes
|
||||
.into_iter()
|
||||
.zip(chunk_proofs.into_iter())
|
||||
.collect();
|
||||
|
||||
let proof_result = panic::catch_unwind(|| {
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_agg_evm_proof(chunk_hashes_proofs, None, OUTPUT_DIR.as_deref())
|
||||
.unwrap();
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).unwrap()
|
||||
});
|
||||
proof_result.map_or(null(), vec_to_c_char)
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
let r = match proof_result {
|
||||
Ok(proof_bytes) => ProofResult {
|
||||
message: Some(proof_bytes),
|
||||
error: None,
|
||||
},
|
||||
Err(err) => ProofResult {
|
||||
message: None,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
||||
@@ -1,11 +1,14 @@
|
||||
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
|
||||
use crate::{
|
||||
types::ProofResult,
|
||||
utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR},
|
||||
};
|
||||
use libc::c_char;
|
||||
use prover::{
|
||||
utils::init_env_and_log,
|
||||
zkevm::{Prover, Verifier},
|
||||
ChunkProof,
|
||||
};
|
||||
use std::{cell::OnceCell, panic, ptr::null};
|
||||
use std::{cell::OnceCell, env, panic, ptr::null};
|
||||
use types::eth::BlockTrace;
|
||||
|
||||
static mut PROVER: OnceCell<Prover> = OnceCell::new();
|
||||
@@ -13,10 +16,14 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char) {
|
||||
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
|
||||
init_env_and_log("ffi_chunk_prove");
|
||||
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let prover = Prover::from_params_dir(params_dir);
|
||||
|
||||
PROVER.set(prover).unwrap();
|
||||
@@ -30,6 +37,8 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
|
||||
let params_dir = c_char_to_str(params_dir);
|
||||
let assets_dir = c_char_to_str(assets_dir);
|
||||
|
||||
// TODO: add a settings in scroll-prover.
|
||||
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
|
||||
let verifier = Verifier::from_dirs(params_dir, assets_dir);
|
||||
|
||||
VERIFIER.set(verifier).unwrap();
|
||||
@@ -37,21 +46,45 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
|
||||
let block_traces = c_char_to_vec(block_traces);
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces).unwrap();
|
||||
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
|
||||
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
|
||||
|
||||
vk_result
|
||||
.ok()
|
||||
.flatten()
|
||||
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
#[no_mangle]
|
||||
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {
|
||||
let proof_result: Result<Vec<u8>, String> = panic::catch_unwind(|| {
|
||||
let block_traces = c_char_to_vec(block_traces);
|
||||
let block_traces = serde_json::from_slice::<Vec<BlockTrace>>(&block_traces)
|
||||
.map_err(|e| format!("failed to deserialize block traces: {e:?}"))?;
|
||||
|
||||
let proof_result = panic::catch_unwind(|| {
|
||||
let proof = PROVER
|
||||
.get_mut()
|
||||
.unwrap()
|
||||
.expect("failed to get mutable reference to PROVER.")
|
||||
.gen_chunk_proof(block_traces, None, OUTPUT_DIR.as_deref())
|
||||
.unwrap();
|
||||
.map_err(|e| format!("failed to generate proof: {e:?}"))?;
|
||||
|
||||
serde_json::to_vec(&proof).unwrap()
|
||||
});
|
||||
serde_json::to_vec(&proof).map_err(|e| format!("failed to serialize the proof: {e:?}"))
|
||||
})
|
||||
.unwrap_or_else(|e| Err(format!("unwind error: {e:?}")));
|
||||
|
||||
proof_result.map_or(null(), vec_to_c_char)
|
||||
let r = match proof_result {
|
||||
Ok(proof_bytes) => ProofResult {
|
||||
message: Some(proof_bytes),
|
||||
error: None,
|
||||
},
|
||||
Err(err) => ProofResult {
|
||||
message: None,
|
||||
error: Some(err),
|
||||
},
|
||||
};
|
||||
|
||||
serde_json::to_vec(&r).map_or(null(), vec_to_c_char)
|
||||
}
|
||||
|
||||
/// # Safety
|
||||
|
||||
@@ -2,4 +2,5 @@
|
||||
|
||||
mod batch;
|
||||
mod chunk;
|
||||
mod types;
|
||||
mod utils;
|
||||
|
||||
22
common/libzkp/impl/src/types.rs
Normal file
22
common/libzkp/impl/src/types.rs
Normal file
@@ -0,0 +1,22 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// Represents the result of a chunk proof checking operation.
|
||||
// `ok` indicates whether the proof checking was successful.
|
||||
// `error` provides additional details in case the check failed.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct CheckChunkProofsResponse {
|
||||
pub ok: bool,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// Encapsulates the result from generating a proof.
|
||||
// `message` holds the generated proof in byte slice format.
|
||||
// `error` provides additional details in case the proof generation failed.
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ProofResult {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<Vec<u8>>,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
@@ -19,6 +19,10 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
|
||||
cstr.to_bytes().to_vec()
|
||||
}
|
||||
|
||||
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
|
||||
CString::new(string).unwrap().into_raw()
|
||||
}
|
||||
|
||||
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
|
||||
CString::new(bytes).unwrap().into_raw()
|
||||
}
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
void init_batch_prover(char* params_dir);
|
||||
void init_batch_prover(char* params_dir, char* assets_dir);
|
||||
void init_batch_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_batch_vk();
|
||||
char* check_chunk_proofs(char* chunk_proofs);
|
||||
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
|
||||
char verify_batch_proof(char* proof);
|
||||
|
||||
void init_chunk_prover(char* params_dir);
|
||||
void init_chunk_prover(char* params_dir, char* assets_dir);
|
||||
void init_chunk_verifier(char* params_dir, char* assets_dir);
|
||||
char* get_chunk_vk();
|
||||
char* gen_chunk_proof(char* block_traces);
|
||||
char verify_chunk_proof(char* proof);
|
||||
|
||||
|
||||
57
common/metrics/ginmetrics/bloom.go
Normal file
57
common/metrics/ginmetrics/bloom.go
Normal file
@@ -0,0 +1,57 @@
|
||||
package ginmetrics
|
||||
|
||||
import (
|
||||
"github.com/bits-and-blooms/bitset"
|
||||
)
|
||||
|
||||
const defaultSize = 2 << 24
|
||||
|
||||
var seeds = []uint{7, 11, 13, 31, 37, 61}
|
||||
|
||||
// BloomFilter a simple bloom filter
|
||||
type BloomFilter struct {
|
||||
Set *bitset.BitSet
|
||||
Funcs [6]simpleHash
|
||||
}
|
||||
|
||||
// NewBloomFilter new a BloomFilter
|
||||
func NewBloomFilter() *BloomFilter {
|
||||
bf := new(BloomFilter)
|
||||
for i := 0; i < len(bf.Funcs); i++ {
|
||||
bf.Funcs[i] = simpleHash{defaultSize, seeds[i]}
|
||||
}
|
||||
bf.Set = bitset.New(defaultSize)
|
||||
return bf
|
||||
}
|
||||
|
||||
// Add a value to BloomFilter
|
||||
func (bf *BloomFilter) Add(value string) {
|
||||
for _, f := range bf.Funcs {
|
||||
bf.Set.Set(f.hash(value))
|
||||
}
|
||||
}
|
||||
|
||||
// Contains check the value is in bloom filter
|
||||
func (bf *BloomFilter) Contains(value string) bool {
|
||||
if value == "" {
|
||||
return false
|
||||
}
|
||||
ret := true
|
||||
for _, f := range bf.Funcs {
|
||||
ret = ret && bf.Set.Test(f.hash(value))
|
||||
}
|
||||
return ret
|
||||
}
|
||||
|
||||
type simpleHash struct {
|
||||
Cap uint
|
||||
Seed uint
|
||||
}
|
||||
|
||||
func (s *simpleHash) hash(value string) uint {
|
||||
var result uint = 0
|
||||
for i := 0; i < len(value); i++ {
|
||||
result = result*s.Seed + uint(value[i])
|
||||
}
|
||||
return (s.Cap - 1) & result
|
||||
}
|
||||
89
common/metrics/ginmetrics/metric.go
Normal file
89
common/metrics/ginmetrics/metric.go
Normal file
@@ -0,0 +1,89 @@
|
||||
package ginmetrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
)
|
||||
|
||||
// Metric defines a metric object. Users can use it to save
|
||||
// metric data. Every metric should be globally unique by name.
|
||||
type Metric struct {
|
||||
Type MetricType
|
||||
Name string
|
||||
Description string
|
||||
Labels []string
|
||||
Buckets []float64
|
||||
Objectives map[float64]float64
|
||||
|
||||
vec prometheus.Collector
|
||||
}
|
||||
|
||||
// SetGaugeValue set data for Gauge type Metric.
|
||||
func (m *Metric) SetGaugeValue(labelValues []string, value float64) error {
|
||||
if m.Type == None {
|
||||
return fmt.Errorf("metric %s not existed", m.Name)
|
||||
}
|
||||
|
||||
if m.Type != Gauge {
|
||||
return fmt.Errorf("metric %s not Gauge type", m.Name)
|
||||
}
|
||||
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Set(value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Inc increases value for Counter/Gauge type metric, increments
|
||||
// the counter by 1
|
||||
func (m *Metric) Inc(labelValues []string) error {
|
||||
if m.Type == None {
|
||||
return fmt.Errorf("metric %s not existed", m.Name)
|
||||
}
|
||||
|
||||
if m.Type != Gauge && m.Type != Counter {
|
||||
return fmt.Errorf("metric %s not Gauge or Counter type", m.Name)
|
||||
}
|
||||
switch m.Type {
|
||||
case Counter:
|
||||
m.vec.(*prometheus.CounterVec).WithLabelValues(labelValues...).Inc()
|
||||
case Gauge:
|
||||
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Inc()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Add adds the given value to the Metric object. Only
|
||||
// for Counter/Gauge type metric.
|
||||
func (m *Metric) Add(labelValues []string, value float64) error {
|
||||
if m.Type == None {
|
||||
return fmt.Errorf("metric %s not existed", m.Name)
|
||||
}
|
||||
|
||||
if m.Type != Gauge && m.Type != Counter {
|
||||
return fmt.Errorf("metric %s not Gauge or Counter type", m.Name)
|
||||
}
|
||||
switch m.Type {
|
||||
case Counter:
|
||||
m.vec.(*prometheus.CounterVec).WithLabelValues(labelValues...).Add(value)
|
||||
case Gauge:
|
||||
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Add(value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Observe is used by Histogram and Summary type metric to
|
||||
// add observations.
|
||||
func (m *Metric) Observe(labelValues []string, value float64) error {
|
||||
if m.Type == 0 {
|
||||
return fmt.Errorf("metric %s not existed", m.Name)
|
||||
}
|
||||
if m.Type != Histogram && m.Type != Summary {
|
||||
return fmt.Errorf("metric %s not Histogram or Summary type", m.Name)
|
||||
}
|
||||
switch m.Type {
|
||||
case Histogram:
|
||||
m.vec.(*prometheus.HistogramVec).WithLabelValues(labelValues...).Observe(value)
|
||||
case Summary:
|
||||
m.vec.(*prometheus.SummaryVec).WithLabelValues(labelValues...).Observe(value)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
155
common/metrics/ginmetrics/middleware.go
Normal file
155
common/metrics/ginmetrics/middleware.go
Normal file
@@ -0,0 +1,155 @@
|
||||
package ginmetrics
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
)
|
||||
|
||||
var (
|
||||
metricRequestTotal = "request_total"
|
||||
metricRequestUVTotal = "request_uv_total"
|
||||
metricURIRequestTotal = "uri_request_total"
|
||||
metricRequestBody = "request_body_total"
|
||||
metricResponseBody = "response_body_total"
|
||||
metricRequestDuration = "request_duration"
|
||||
metricSlowRequest = "slow_request_total"
|
||||
|
||||
bloomFilter *BloomFilter
|
||||
)
|
||||
|
||||
// Use set gin metrics middleware
|
||||
func (m *Monitor) Use(r gin.IRoutes) {
|
||||
m.initGinMetrics()
|
||||
|
||||
r.Use(m.monitorInterceptor)
|
||||
r.GET(m.metricPath, func(ctx *gin.Context) {
|
||||
promhttp.Handler().ServeHTTP(ctx.Writer, ctx.Request)
|
||||
})
|
||||
}
|
||||
|
||||
// UseWithoutExposingEndpoint is used to add monitor interceptor to gin router
|
||||
// It can be called multiple times to intercept from multiple gin.IRoutes
|
||||
// http path is not set, to do that use Expose function
|
||||
func (m *Monitor) UseWithoutExposingEndpoint(r gin.IRoutes) {
|
||||
m.initGinMetrics()
|
||||
r.Use(m.monitorInterceptor)
|
||||
}
|
||||
|
||||
// Expose adds metric path to a given router.
|
||||
// The router can be different with the one passed to UseWithoutExposingEndpoint.
|
||||
// This allows to expose metrics on different port.
|
||||
func (m *Monitor) Expose(r gin.IRoutes) {
|
||||
r.GET(m.metricPath, func(ctx *gin.Context) {
|
||||
promhttp.Handler().ServeHTTP(ctx.Writer, ctx.Request)
|
||||
})
|
||||
}
|
||||
|
||||
// initGinMetrics used to init gin metrics
|
||||
func (m *Monitor) initGinMetrics() {
|
||||
bloomFilter = NewBloomFilter()
|
||||
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Counter,
|
||||
Name: metricRequestTotal,
|
||||
Description: "all the server received request num.",
|
||||
Labels: nil,
|
||||
})
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Counter,
|
||||
Name: metricRequestUVTotal,
|
||||
Description: "all the server received ip num.",
|
||||
Labels: nil,
|
||||
})
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Counter,
|
||||
Name: metricURIRequestTotal,
|
||||
Description: "all the server received request num with every uri.",
|
||||
Labels: []string{"uri", "method", "code"},
|
||||
})
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Counter,
|
||||
Name: metricRequestBody,
|
||||
Description: "the server received request body size, unit byte",
|
||||
Labels: nil,
|
||||
})
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Counter,
|
||||
Name: metricResponseBody,
|
||||
Description: "the server send response body size, unit byte",
|
||||
Labels: nil,
|
||||
})
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Histogram,
|
||||
Name: metricRequestDuration,
|
||||
Description: "the time server took to handle the request.",
|
||||
Labels: []string{"uri"},
|
||||
Buckets: m.reqDuration,
|
||||
})
|
||||
_ = monitor.AddMetric(&Metric{
|
||||
Type: Counter,
|
||||
Name: metricSlowRequest,
|
||||
Description: fmt.Sprintf("the server handled slow requests counter, t=%d.", m.slowTime),
|
||||
Labels: []string{"uri", "method", "code"},
|
||||
})
|
||||
}
|
||||
|
||||
// monitorInterceptor as gin monitor middleware.
|
||||
func (m *Monitor) monitorInterceptor(ctx *gin.Context) {
|
||||
if ctx.Request.URL.Path == m.metricPath {
|
||||
ctx.Next()
|
||||
return
|
||||
}
|
||||
startTime := time.Now()
|
||||
|
||||
// execute normal process.
|
||||
ctx.Next()
|
||||
|
||||
// after request
|
||||
m.ginMetricHandle(ctx, startTime)
|
||||
}
|
||||
|
||||
func (m *Monitor) ginMetricHandle(ctx *gin.Context, start time.Time) {
|
||||
r := ctx.Request
|
||||
w := ctx.Writer
|
||||
|
||||
//set request total
|
||||
_ = m.GetMetric(metricRequestTotal).Inc(nil)
|
||||
|
||||
// set uv
|
||||
if clientIP := ctx.ClientIP(); !bloomFilter.Contains(clientIP) {
|
||||
bloomFilter.Add(clientIP)
|
||||
_ = m.GetMetric(metricRequestUVTotal).Inc(nil)
|
||||
}
|
||||
|
||||
errCode := strconv.Itoa(ctx.GetInt("errcode"))
|
||||
if len(errCode) == 0 {
|
||||
errCode = strconv.Itoa(w.Status())
|
||||
}
|
||||
|
||||
// set uri request total
|
||||
_ = m.GetMetric(metricURIRequestTotal).Inc([]string{ctx.FullPath(), r.Method, errCode})
|
||||
|
||||
// set request body size
|
||||
// since r.ContentLength can be negative (in some occasions) guard the operation
|
||||
if r.ContentLength >= 0 {
|
||||
_ = m.GetMetric(metricRequestBody).Add(nil, float64(r.ContentLength))
|
||||
}
|
||||
|
||||
// set slow request
|
||||
latency := time.Since(start)
|
||||
if int32(latency.Seconds()) > m.slowTime {
|
||||
_ = m.GetMetric(metricSlowRequest).Inc([]string{ctx.FullPath(), r.Method, strconv.Itoa(w.Status())})
|
||||
}
|
||||
|
||||
// set request duration
|
||||
_ = m.GetMetric(metricRequestDuration).Observe([]string{ctx.FullPath()}, latency.Seconds())
|
||||
|
||||
// set response size
|
||||
if w.Size() > 0 {
|
||||
_ = m.GetMetric(metricResponseBody).Add(nil, float64(w.Size()))
|
||||
}
|
||||
}
|
||||
158
common/metrics/ginmetrics/types.go
Normal file
158
common/metrics/ginmetrics/types.go
Normal file
@@ -0,0 +1,158 @@
|
||||
package ginmetrics
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promauto"
|
||||
)
|
||||
|
||||
// MetricType define metric type
|
||||
type MetricType int
|
||||
|
||||
const (
|
||||
// None unknown metric type
|
||||
None MetricType = iota
|
||||
// Counter MetricType
|
||||
Counter
|
||||
// Gauge MetricType
|
||||
Gauge
|
||||
// Histogram MetricType
|
||||
Histogram
|
||||
// Summary MetricType
|
||||
Summary
|
||||
|
||||
defaultMetricPath = "/debug/metrics"
|
||||
defaultSlowTime = int32(5)
|
||||
)
|
||||
|
||||
var (
|
||||
defaultDuration = []float64{0.1, 0.3, 1.2, 5, 10}
|
||||
monitor *Monitor
|
||||
|
||||
promTypeHandler = map[MetricType]func(metric *Metric, reg prometheus.Registerer){
|
||||
Counter: counterHandler,
|
||||
Gauge: gaugeHandler,
|
||||
Histogram: histogramHandler,
|
||||
Summary: summaryHandler,
|
||||
}
|
||||
)
|
||||
|
||||
// Monitor is an object that uses to set gin server monitor.
|
||||
type Monitor struct {
|
||||
slowTime int32
|
||||
metricPath string
|
||||
reqDuration []float64
|
||||
metrics map[string]*Metric
|
||||
register prometheus.Registerer
|
||||
}
|
||||
|
||||
// GetMonitor used to get global Monitor object,
|
||||
// this function returns a singleton object.
|
||||
func GetMonitor(reg prometheus.Registerer) *Monitor {
|
||||
if monitor == nil {
|
||||
monitor = &Monitor{
|
||||
metricPath: defaultMetricPath,
|
||||
slowTime: defaultSlowTime,
|
||||
reqDuration: defaultDuration,
|
||||
metrics: make(map[string]*Metric),
|
||||
register: reg,
|
||||
}
|
||||
}
|
||||
return monitor
|
||||
}
|
||||
|
||||
// GetMetric used to get metric object by metric_name.
|
||||
func (m *Monitor) GetMetric(name string) *Metric {
|
||||
if metric, ok := m.metrics[name]; ok {
|
||||
return metric
|
||||
}
|
||||
return &Metric{}
|
||||
}
|
||||
|
||||
// SetMetricPath set metricPath property. metricPath is used for Prometheus
|
||||
// to get gin server monitoring data.
|
||||
func (m *Monitor) SetMetricPath(path string) {
|
||||
m.metricPath = path
|
||||
}
|
||||
|
||||
// SetSlowTime set slowTime property. slowTime is used to determine whether
|
||||
// the request is slow. For "gin_slow_request_total" metric.
|
||||
func (m *Monitor) SetSlowTime(slowTime int32) {
|
||||
m.slowTime = slowTime
|
||||
}
|
||||
|
||||
// SetDuration set reqDuration property. reqDuration is used to ginRequestDuration
|
||||
// metric buckets.
|
||||
func (m *Monitor) SetDuration(duration []float64) {
|
||||
m.reqDuration = duration
|
||||
}
|
||||
|
||||
// SetMetricPrefix set the metric prefix
|
||||
func (m *Monitor) SetMetricPrefix(prefix string) {
|
||||
metricRequestTotal = prefix + metricRequestTotal
|
||||
metricRequestUVTotal = prefix + metricRequestUVTotal
|
||||
metricURIRequestTotal = prefix + metricURIRequestTotal
|
||||
metricRequestBody = prefix + metricRequestBody
|
||||
metricResponseBody = prefix + metricResponseBody
|
||||
metricRequestDuration = prefix + metricRequestDuration
|
||||
metricSlowRequest = prefix + metricSlowRequest
|
||||
}
|
||||
|
||||
// SetMetricSuffix set the metric suffix
|
||||
func (m *Monitor) SetMetricSuffix(suffix string) {
|
||||
metricRequestTotal += suffix
|
||||
metricRequestUVTotal += suffix
|
||||
metricURIRequestTotal += suffix
|
||||
metricRequestBody += suffix
|
||||
metricResponseBody += suffix
|
||||
metricRequestDuration += suffix
|
||||
metricSlowRequest += suffix
|
||||
}
|
||||
|
||||
// AddMetric add custom monitor metric.
|
||||
func (m *Monitor) AddMetric(metric *Metric) error {
|
||||
if _, ok := m.metrics[metric.Name]; ok {
|
||||
return fmt.Errorf("metric %s is existed", metric.Name)
|
||||
}
|
||||
|
||||
if metric.Name == "" {
|
||||
return errors.New("metric name cannot be empty")
|
||||
}
|
||||
|
||||
if f, ok := promTypeHandler[metric.Type]; ok {
|
||||
f(metric, m.register)
|
||||
m.metrics[metric.Name] = metric
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func counterHandler(metric *Metric, register prometheus.Registerer) {
|
||||
metric.vec = promauto.With(register).NewCounterVec(
|
||||
prometheus.CounterOpts{Name: metric.Name, Help: metric.Description},
|
||||
metric.Labels,
|
||||
)
|
||||
}
|
||||
|
||||
func gaugeHandler(metric *Metric, register prometheus.Registerer) {
|
||||
metric.vec = promauto.With(register).NewGaugeVec(
|
||||
prometheus.GaugeOpts{Name: metric.Name, Help: metric.Description},
|
||||
metric.Labels,
|
||||
)
|
||||
}
|
||||
|
||||
func histogramHandler(metric *Metric, register prometheus.Registerer) {
|
||||
metric.vec = promauto.With(register).NewHistogramVec(
|
||||
prometheus.HistogramOpts{Name: metric.Name, Help: metric.Description, Buckets: metric.Buckets},
|
||||
metric.Labels,
|
||||
)
|
||||
}
|
||||
|
||||
func summaryHandler(metric *Metric, register prometheus.Registerer) {
|
||||
promauto.With(register).NewSummaryVec(
|
||||
prometheus.SummaryOpts{Name: metric.Name, Help: metric.Description, Objectives: metric.Objectives},
|
||||
metric.Labels,
|
||||
)
|
||||
}
|
||||
@@ -1,57 +0,0 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"context"
|
||||
"net"
|
||||
"net/http"
|
||||
"strconv"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/scroll-tech/go-ethereum/metrics"
|
||||
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
|
||||
"github.com/scroll-tech/go-ethereum/rpc"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
var (
|
||||
// ScrollRegistry is used for scroll metrics.
|
||||
ScrollRegistry = metrics.NewRegistry()
|
||||
)
|
||||
|
||||
// Serve starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Serve(ctx context.Context, c *cli.Context) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
address := net.JoinHostPort(
|
||||
c.String(utils.MetricsAddr.Name),
|
||||
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
|
||||
)
|
||||
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: prometheus.Handler(ScrollRegistry),
|
||||
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
|
||||
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
|
||||
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
|
||||
}
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
if err := server.Close(); err != nil {
|
||||
log.Error("Failed to close metrics server", "error", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if err := server.ListenAndServe(); err != nil {
|
||||
log.Error("start metrics server error", "error", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
18
common/metrics/middleware.go
Normal file
18
common/metrics/middleware.go
Normal file
@@ -0,0 +1,18 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
"scroll-tech/common/metrics/ginmetrics"
|
||||
)
|
||||
|
||||
// Use register the gin metric
|
||||
func Use(router *gin.Engine, metricsPrefix string, reg prometheus.Registerer) {
|
||||
m := ginmetrics.GetMonitor(reg)
|
||||
m.SetMetricPath("/metrics")
|
||||
m.SetMetricPrefix(metricsPrefix + "_")
|
||||
m.SetSlowTime(1)
|
||||
m.SetDuration([]float64{0.025, .05, .1, .5, 1, 5, 10})
|
||||
m.UseWithoutExposingEndpoint(router)
|
||||
}
|
||||
49
common/metrics/server.go
Normal file
49
common/metrics/server.go
Normal file
@@ -0,0 +1,49 @@
|
||||
package metrics
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
// enable the pprof
|
||||
_ "net/http/pprof"
|
||||
|
||||
"github.com/gin-contrib/pprof"
|
||||
"github.com/gin-gonic/gin"
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
"github.com/urfave/cli/v2"
|
||||
|
||||
"scroll-tech/common/utils"
|
||||
)
|
||||
|
||||
// Server starts the metrics server on the given address, will be closed when the given
|
||||
// context is canceled.
|
||||
func Server(c *cli.Context, reg *prometheus.Registry) {
|
||||
if !c.Bool(utils.MetricsEnabled.Name) {
|
||||
return
|
||||
}
|
||||
|
||||
r := gin.New()
|
||||
r.Use(gin.Recovery())
|
||||
pprof.Register(r)
|
||||
r.GET("/metrics", func(context *gin.Context) {
|
||||
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
|
||||
})
|
||||
|
||||
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
|
||||
server := &http.Server{
|
||||
Addr: address,
|
||||
Handler: r,
|
||||
ReadHeaderTimeout: time.Minute,
|
||||
}
|
||||
log.Info("Starting metrics server", "address", address)
|
||||
|
||||
go func() {
|
||||
if runServerErr := server.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
|
||||
log.Crit("run metrics http server failure", "error", runServerErr)
|
||||
}
|
||||
}()
|
||||
}
|
||||
@@ -10,6 +10,16 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
|
||||
// BatchMeta contains metadata of a batch.
|
||||
type BatchMeta struct {
|
||||
StartChunkIndex uint64
|
||||
StartChunkHash string
|
||||
EndChunkIndex uint64
|
||||
EndChunkHash string
|
||||
TotalL1CommitGas uint64
|
||||
TotalL1CommitCalldataSize uint32
|
||||
}
|
||||
|
||||
// BatchHeader contains batch header info to be committed.
|
||||
type BatchHeader struct {
|
||||
// Encoded in BatchHeaderV0Codec
|
||||
|
||||
@@ -3,20 +3,31 @@ package types
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"math"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/log"
|
||||
)
|
||||
|
||||
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
|
||||
const CalldataNonZeroByteGas = 16
|
||||
|
||||
// GetKeccak256Gas calculates keccak256 hash gas.
|
||||
func GetKeccak256Gas(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
|
||||
type WrappedBlock struct {
|
||||
Header *types.Header `json:"header"`
|
||||
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption"`
|
||||
Transactions []*types.TransactionData `json:"transactions"`
|
||||
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
|
||||
RowConsumption *types.RowConsumption `json:"row_consumption"`
|
||||
txPayloadLengthCache map[string]uint64
|
||||
}
|
||||
|
||||
// NumL1Messages returns the number of L1 messages in this block.
|
||||
@@ -87,17 +98,15 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
size += uint64(len(txData.Data))
|
||||
size += 4 // 4 bytes payload length
|
||||
size += w.getTxPayloadLength(txData)
|
||||
}
|
||||
size += 60 // 60 bytes BlockContext
|
||||
return size
|
||||
}
|
||||
|
||||
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
|
||||
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
var total uint64
|
||||
var numL1Messages uint64
|
||||
for _, txData := range w.Transactions {
|
||||
@@ -106,25 +115,15 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
continue
|
||||
}
|
||||
|
||||
data, _ := hexutil.Decode(txData.Data)
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, _ := tx.MarshalBinary()
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += 16 * 4 // size of a uint32 field
|
||||
total += getKeccakGas(txPayloadLength) // l2 tx hash
|
||||
txPayloadLength := w.getTxPayloadLength(txData)
|
||||
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
|
||||
total += CalldataNonZeroByteGas * 4 // 4 bytes payload length
|
||||
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
|
||||
}
|
||||
|
||||
// 60 bytes BlockContext calldata
|
||||
total += CalldataNonZeroByteGas * 60
|
||||
|
||||
// sload
|
||||
total += 2100 * numL1Messages // numL1Messages times cold sload in L1MessageQueue
|
||||
|
||||
@@ -135,13 +134,47 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
|
||||
return total
|
||||
}
|
||||
|
||||
// L2TxsNum calculates the number of l2 txs.
|
||||
func (w *WrappedBlock) L2TxsNum() uint64 {
|
||||
var count uint64
|
||||
for _, txData := range w.Transactions {
|
||||
if txData.Type != types.L1MessageTxType {
|
||||
count++
|
||||
}
|
||||
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
|
||||
if w.txPayloadLengthCache == nil {
|
||||
w.txPayloadLengthCache = make(map[string]uint64)
|
||||
}
|
||||
return count
|
||||
|
||||
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
|
||||
return length
|
||||
}
|
||||
|
||||
rlpTxData, err := convertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
|
||||
return 0
|
||||
}
|
||||
txPayloadLength := uint64(len(rlpTxData))
|
||||
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
|
||||
return txPayloadLength
|
||||
}
|
||||
|
||||
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
|
||||
data, err := hexutil.Decode(txData.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
|
||||
}
|
||||
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
|
||||
rlpTxData, err := tx.MarshalBinary()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
|
||||
}
|
||||
|
||||
return rlpTxData, nil
|
||||
}
|
||||
|
||||
@@ -8,7 +8,6 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/scroll-tech/go-ethereum/common"
|
||||
"github.com/scroll-tech/go-ethereum/common/hexutil"
|
||||
"github.com/scroll-tech/go-ethereum/core/types"
|
||||
"github.com/scroll-tech/go-ethereum/crypto"
|
||||
)
|
||||
@@ -65,23 +64,7 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
|
||||
if txData.Type == types.L1MessageTxType {
|
||||
continue
|
||||
}
|
||||
data, err := hexutil.Decode(txData.Data)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// right now we only support legacy tx
|
||||
tx := types.NewTx(&types.LegacyTx{
|
||||
Nonce: txData.Nonce,
|
||||
To: txData.To,
|
||||
Value: txData.Value.ToInt(),
|
||||
Gas: txData.Gas,
|
||||
GasPrice: txData.GasPrice.ToInt(),
|
||||
Data: data,
|
||||
V: txData.V.ToInt(),
|
||||
R: txData.R.ToInt(),
|
||||
S: txData.S.ToInt(),
|
||||
})
|
||||
rlpTxData, err := tx.MarshalBinary()
|
||||
rlpTxData, err := convertTxDataToRLPEncoding(txData)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -146,14 +129,10 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
|
||||
}
|
||||
|
||||
numBlocks := uint64(len(c.Blocks))
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
|
||||
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
|
||||
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
|
||||
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
|
||||
|
||||
getKeccakGas := func(size uint64) uint64 {
|
||||
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
|
||||
}
|
||||
|
||||
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
|
||||
return totalL1CommitGas
|
||||
}
|
||||
|
||||
@@ -38,11 +38,15 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(298), wrappedBlock.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(2), wrappedBlock.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(6006), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString := hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
@@ -56,11 +60,15 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
|
||||
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
|
||||
assert.Equal(t, uint64(1), wrappedBlock2.NumL2Transactions())
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
@@ -75,6 +83,8 @@ func TestChunkEncode(t *testing.T) {
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
|
||||
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
|
||||
bytes, err = chunk.Encode(0)
|
||||
hexString = hex.EncodeToString(bytes)
|
||||
assert.NoError(t, err)
|
||||
@@ -136,3 +146,81 @@ func TestChunkHash(t *testing.T) {
|
||||
assert.NoError(t, err)
|
||||
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
|
||||
}
|
||||
|
||||
func TestErrorPaths(t *testing.T) {
|
||||
// test 1: Header.Number is not a uint64
|
||||
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock := &WrappedBlock{}
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
|
||||
bytes, err := wrappedBlock.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "block number is not uint64")
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
for i := 0; i < 65537; i++ {
|
||||
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
|
||||
}
|
||||
|
||||
bytes, err = wrappedBlock.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
chunk := &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock,
|
||||
},
|
||||
}
|
||||
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
|
||||
|
||||
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
|
||||
wrappedBlock.Transactions[0].Data = "not-a-hex"
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "hex string without 0x prefix")
|
||||
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
|
||||
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
|
||||
_, err = chunk.Hash(0)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "invalid byte")
|
||||
|
||||
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
|
||||
assert.NoError(t, err)
|
||||
|
||||
wrappedBlock2 := &WrappedBlock{}
|
||||
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
|
||||
for i := 0; i < 65535; i++ {
|
||||
tx := &wrappedBlock2.Transactions[i]
|
||||
txCopy := *tx
|
||||
txCopy.Nonce = uint64(i + 1)
|
||||
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
|
||||
}
|
||||
|
||||
bytes, err = wrappedBlock2.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
|
||||
chunk = &Chunk{
|
||||
Blocks: []*WrappedBlock{
|
||||
wrappedBlock2,
|
||||
},
|
||||
}
|
||||
|
||||
bytes, err = chunk.Encode(0)
|
||||
assert.Nil(t, bytes)
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
|
||||
|
||||
}
|
||||
|
||||
@@ -126,8 +126,8 @@ const (
|
||||
ProvingTaskUnassigned
|
||||
// ProvingTaskAssigned : proving_task is assigned to be proved
|
||||
ProvingTaskAssigned
|
||||
// ProvingTaskProved : proof has been returned by prover
|
||||
ProvingTaskProved
|
||||
// ProvingTaskProvedDEPRECATED DEPRECATED: proof has been returned by prover
|
||||
ProvingTaskProvedDEPRECATED
|
||||
// ProvingTaskVerified : proof is valid
|
||||
ProvingTaskVerified
|
||||
// ProvingTaskFailed : fail to generate proof
|
||||
@@ -140,7 +140,7 @@ func (ps ProvingStatus) String() string {
|
||||
return "unassigned"
|
||||
case ProvingTaskAssigned:
|
||||
return "assigned"
|
||||
case ProvingTaskProved:
|
||||
case ProvingTaskProvedDEPRECATED:
|
||||
return "proved"
|
||||
case ProvingTaskVerified:
|
||||
return "verified"
|
||||
|
||||
@@ -58,8 +58,8 @@ func TestProvingStatus(t *testing.T) {
|
||||
"assigned",
|
||||
},
|
||||
{
|
||||
"ProvingTaskProved",
|
||||
ProvingTaskProved,
|
||||
"ProvingTaskProvedDEPRECATED",
|
||||
ProvingTaskProvedDEPRECATED,
|
||||
"proved",
|
||||
},
|
||||
{
|
||||
|
||||
@@ -13,6 +13,18 @@ import (
|
||||
"github.com/scroll-tech/go-ethereum/rlp"
|
||||
)
|
||||
|
||||
// ProofFailureType the proof failure type
|
||||
type ProofFailureType int
|
||||
|
||||
const (
|
||||
// ProofFailureUndefined the undefined type proof failure type
|
||||
ProofFailureUndefined ProofFailureType = iota
|
||||
// ProofFailurePanic proof failure for prover panic
|
||||
ProofFailurePanic
|
||||
// ProofFailureNoPanic proof failure for no prover panic
|
||||
ProofFailureNoPanic
|
||||
)
|
||||
|
||||
// RespStatus represents status code from prover to scroll
|
||||
type RespStatus uint32
|
||||
|
||||
@@ -199,6 +211,7 @@ func (a *ProofMsg) PublicKey() (string, error) {
|
||||
|
||||
// TaskMsg is a wrapper type around db ProveTask type.
|
||||
type TaskMsg struct {
|
||||
UUID string `json:"uuid"`
|
||||
ID string `json:"id"`
|
||||
Type ProofType `json:"type,omitempty"`
|
||||
BatchTaskDetail *BatchTaskDetail `json:"batch_task_detail,omitempty"`
|
||||
@@ -255,6 +268,9 @@ type ChunkProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// BatchProof includes the proof info that are required for batch verification and rollup.
|
||||
@@ -262,6 +278,8 @@ type BatchProof struct {
|
||||
Proof []byte `json:"proof"`
|
||||
Instances []byte `json:"instances"`
|
||||
Vk []byte `json:"vk"`
|
||||
// cross-reference between cooridinator computation and prover compution
|
||||
GitVersion string `json:"git_version,omitempty"`
|
||||
}
|
||||
|
||||
// SanityCheck checks whether an BatchProof is in a legal format
|
||||
|
||||
@@ -73,6 +73,7 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
},
|
||||
@@ -101,12 +102,13 @@ func TestProofDetailHash(t *testing.T) {
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
}
|
||||
hash, err := proofDetail.Hash()
|
||||
assert.NoError(t, err)
|
||||
expectedHash := "4165f5ab3399001002a5b8e4062914249a2deb72f6133d647b586f53e236802d"
|
||||
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
|
||||
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
|
||||
}
|
||||
|
||||
@@ -136,6 +138,7 @@ func TestProofMsgPublicKey(t *testing.T) {
|
||||
Proof: []byte("testProof"),
|
||||
Instances: []byte("testInstance"),
|
||||
Vk: []byte("testVk"),
|
||||
ChunkInfo: nil,
|
||||
},
|
||||
Error: "testError",
|
||||
},
|
||||
|
||||
27
common/utils/http.go
Normal file
27
common/utils/http.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package utils
|
||||
|
||||
import (
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
// StartHTTPServer a public http server to be used.
|
||||
func StartHTTPServer(address string, handler http.Handler) (*http.Server, error) {
|
||||
srv := &http.Server{
|
||||
Handler: handler,
|
||||
Addr: address,
|
||||
ReadTimeout: time.Second * 3,
|
||||
WriteTimeout: time.Second * 3,
|
||||
IdleTimeout: time.Second * 12,
|
||||
}
|
||||
errCh := make(chan error, 1)
|
||||
go func() {
|
||||
errCh <- srv.ListenAndServe()
|
||||
}()
|
||||
select {
|
||||
case err := <-errCh:
|
||||
return nil, err
|
||||
case <-time.After(time.Second):
|
||||
}
|
||||
return srv, nil
|
||||
}
|
||||
@@ -20,12 +20,17 @@ var (
|
||||
MessageRelayerApp MockAppName = "message-relayer-test"
|
||||
// RollupRelayerApp the name of mock rollup-relayer app.
|
||||
RollupRelayerApp MockAppName = "rollup-relayer-test"
|
||||
// CoordinatorApp the name of mock coordinator app.
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
|
||||
// DBCliApp the name of mock database app.
|
||||
DBCliApp MockAppName = "db_cli-test"
|
||||
// ProverApp the name of mock prover app.
|
||||
ProverApp MockAppName = "prover-test"
|
||||
|
||||
// CoordinatorApp the name of mock coordinator app.
|
||||
CoordinatorApp MockAppName = "coordinator-test"
|
||||
|
||||
// ChunkProverApp the name of mock chunk prover app.
|
||||
ChunkProverApp MockAppName = "chunkProver-test"
|
||||
// BatchProverApp the name of mock batch prover app.
|
||||
BatchProverApp MockAppName = "batchProver-test"
|
||||
)
|
||||
|
||||
// RegisterSimulation register initializer function for integration-test.
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user