Compare commits

..

39 Commits

Author SHA1 Message Date
colinlyguo
106f409a02 tweak and bump version 2023-08-05 16:00:33 +08:00
colinlyguo
b42a17580b go mod tidy and bump version 2023-08-05 03:41:04 +08:00
colinlyguo
47bf98eb30 address comments 2023-08-05 03:32:29 +08:00
colinlyguo
e3a49fc041 happy path integration test 2023-08-04 22:31:56 +08:00
colinlyguo
72355067b8 fix get task and proof 2023-08-04 20:56:52 +08:00
georgehao
fcf34edbfb feat(coordinator): add prover rest api (#716)
Co-authored-by: colinlyguo <colinlyguo@scroll.io>
2023-08-04 20:48:17 +08:00
georgehao
d11de12637 Merge branch 'develop' of github.com:scroll-tech/scroll into pull-model-prover-client 2023-08-04 17:01:31 +08:00
colin
3be3f9926a Merge branch 'develop' into pull-model-prover-client 2023-08-04 15:22:39 +08:00
colin
ba51b5794d Merge branch 'develop' into pull-model-prover-client 2023-08-04 14:37:08 +08:00
colinlyguo
f22efbff22 tweak integration-test 2023-08-03 20:02:26 +08:00
colin
8f92ca9010 Merge branch 'develop' into pull-model-prover-client 2023-08-03 19:33:16 +08:00
colinlyguo
2c69352948 fix 2023-08-03 19:17:22 +08:00
colinlyguo
0652852c16 address comments 2023-08-03 19:01:46 +08:00
colinlyguo
bbb4ca6d76 revert flag change 2023-08-03 17:56:26 +08:00
colinlyguo
1b542830af add mock_proof_submission config 2023-08-03 17:38:25 +08:00
colinlyguo
975e61afe4 tweak 2023-08-03 17:12:45 +08:00
colinlyguo
7bf538f287 refactor error code 2023-08-03 17:08:26 +08:00
colinlyguo
58fc8a3add remove signature 2023-08-03 16:30:27 +08:00
colinlyguo
bf9dabd9b7 tweak login request 2023-08-03 15:54:50 +08:00
colinlyguo
beea5754cb add retry mechanism for JWT expiration 2023-08-03 15:46:14 +08:00
colin
d0c82f78ad Merge branch 'develop' into pull-model-prover-client 2023-08-03 15:03:02 +08:00
colinlyguo
3913a7397c run goimport 2023-08-03 15:01:51 +08:00
colinlyguo
c63e25a9bf add login auth 2023-08-03 14:57:24 +08:00
colinlyguo
8960849677 address comments 2023-08-03 13:39:31 +08:00
colinlyguo
24d8658ddb address comments 2023-08-03 13:11:12 +08:00
colinlyguo
f88f883b6a simplify http body result marshal 2023-08-03 11:46:57 +08:00
colinlyguo
c8fd76dd02 trigger ci 2023-08-02 16:48:04 +08:00
colin
b82bbedaca Merge branch 'develop' into pull-model-prover-client 2023-08-02 16:36:14 +08:00
colinlyguo
1ea0dcd0ae bump version 2023-08-02 16:35:42 +08:00
colinlyguo
e87bdbabe2 tweak code 2023-08-02 16:32:41 +08:00
colinlyguo
4db1565ba3 comment out prover start process test (coordinator not support login yet) 2023-08-02 03:13:36 +08:00
colinlyguo
2425374362 fix 2023-08-02 03:05:06 +08:00
colinlyguo
89b2e3133a scrollfmt 2023-08-02 02:44:42 +08:00
colinlyguo
5b7a2bb40c bump version 2023-08-02 02:36:18 +08:00
colinlyguo
0466e2d201 add retry configs 2023-08-02 02:32:05 +08:00
colinlyguo
f86cbb2bcb fix 2023-08-02 02:14:30 +08:00
colin
712c2c5ad3 Merge branch 'develop' into pull-model-prover-client 2023-08-02 01:54:47 +08:00
colinlyguo
a8a5db9a18 fix 2023-08-01 22:27:45 +08:00
colinlyguo
61d8b48b73 a commit 2023-08-01 21:05:13 +08:00
210 changed files with 2155 additions and 5986 deletions

View File

@@ -1,37 +0,0 @@
import { URL } from "url";
import { readFileSync, writeFileSync } from "fs";
const versionFilePath = new URL(
"../../common/version/version.go",
import.meta.url
).pathname;
const versionFileContent = readFileSync(versionFilePath, { encoding: "utf-8" });
const currentVersion = versionFileContent.match(
/var tag = "(?<version>v(?<major>\d+)\.(?<minor>\d+)\.(?<patch>\d+))"/
);
try {
parseInt(currentVersion.groups.major);
parseInt(currentVersion.groups.minor);
parseInt(currentVersion.groups.patch);
} catch (err) {
console.error(new Error("Failed to parse version in version.go file"));
throw err;
}
// prettier-ignore
const newVersion = `v${currentVersion.groups.major}.${currentVersion.groups.minor}.${parseInt(currentVersion.groups.patch) + 1}`;
console.log(
`Bump version from ${currentVersion.groups.version} to ${newVersion}`
);
writeFileSync(
versionFilePath,
versionFileContent.replace(
`var tag = "${currentVersion.groups.version}"`,
`var tag = "${newVersion}"`
)
);

View File

@@ -1,61 +0,0 @@
name: Bump Version
on:
pull_request:
branches: [develop]
types:
- opened
- reopened
- synchronize
- ready_for_review
jobs:
try-to-bump:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.head_ref }}
- name: check diff
id: check_diff
run: |
set -euo pipefail
# fetch develop branch so that we can diff against later
git fetch origin develop
echo 'checking verion changes in diff...'
# check if version changed in version.go
# note: the grep will fail if use \d instead of [0-9]
git diff HEAD..origin/develop --text --no-ext-diff --unified=0 --no-prefix common/version/version.go | grep -E '^\+var tag = "v[0-9]+\.[0-9]+\.[0-9]+"$' && true
exit_code=$?
# auto bump if version is not bumped manually
echo '> require auto version bump?'
if [ $exit_code -eq 0 ]; then
echo '> no, already bumped'
echo "result=no-bump" >> "$GITHUB_OUTPUT"
else
echo '> yes'
echo "result=bump" >> "$GITHUB_OUTPUT"
fi
- name: Install Node.js 16
if: steps.check_diff.outputs.result == 'bump'
uses: actions/setup-node@v3
with:
node-version: 16
- name: bump version in common/version/version.go
if: steps.check_diff.outputs.result == 'bump'
run: node .github/scripts/bump_version_dot_go.mjs
# Commits made by this Action do not trigger new Workflow runs
- uses: stefanzweifel/git-auto-commit-action@3ea6ae190baf489ba007f7c92608f33ce20ef04a
if: steps.check_diff.outputs.result == 'bump'
with:
skip_fetch: true # already did fetch in check diff
file_pattern: "common/version/version.go"
commit_message: "chore: auto version bump[bot]"

View File

@@ -6,7 +6,7 @@ on:
- v**
jobs:
event_watcher:
build-and-push:
runs-on: ubuntu-latest
steps:
- name: Checkout code
@@ -27,18 +27,6 @@ jobs:
tags: scrolltech/event-watcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
gas_oracle:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push gas_oracle docker
uses: docker/build-push-action@v2
with:
@@ -48,18 +36,6 @@ jobs:
tags: scrolltech/gas-oracle:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
msg_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push msg_relayer docker
uses: docker/build-push-action@v2
with:
@@ -69,18 +45,6 @@ jobs:
tags: scrolltech/msg-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
rollup_relayer:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push rollup_relayer docker
uses: docker/build-push-action@v2
with:
@@ -90,18 +54,6 @@ jobs:
tags: scrolltech/rollup-relayer:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-cross-msg-fetcher:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-cross-msg-fetcher docker
uses: docker/build-push-action@v2
with:
@@ -111,18 +63,6 @@ jobs:
tags: scrolltech/bridgehistoryapi-cross-msg-fetcher:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
bridgehistoryapi-server:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push bridgehistoryapi-server docker
uses: docker/build-push-action@v2
with:
@@ -132,18 +72,6 @@ jobs:
tags: scrolltech/bridgehistoryapi-server:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
coordinator:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push coordinator docker
uses: docker/build-push-action@v2
with:
@@ -153,24 +81,3 @@ jobs:
tags: scrolltech/coordinator:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}
prover-stats-api:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v2
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2
- name: Login to Docker Hub
uses: docker/login-action@v2
with:
username: ${{ secrets.DOCKERHUB_USERNAME }}
password: ${{ secrets.DOCKERHUB_TOKEN }}
- name: Build and push prover-stats-api docker
uses: docker/build-push-action@v2
with:
context: .
file: ./build/dockerfiles/prover-stats-api.Dockerfile
push: true
tags: scrolltech/prover-stats-api:${{github.ref_name}}
# cache-from: type=gha,scope=${{ github.workflow }}
# cache-to: type=gha,scope=${{ github.workflow }}

View File

@@ -1,7 +1,5 @@
.PHONY: check update dev_docker build_test_docker run_test_docker clean
L2GETH_TAG=scroll-v4.3.34
help: ## Display this help message
@grep -h \
-E '^[a-zA-Z_-]+:.*?## .*$$' $(MAKEFILE_LIST) | \
@@ -17,14 +15,14 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
cd $(PWD)/bridge-history-api/ && go get -u github.com/ethereum/go-ethereum@latest && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@${L2GETH_TAG} && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
cd $(PWD)/prover/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
cd $(PWD)/prover-stats-api/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
cd $(PWD)/tests/integration-test/ && go get -u github.com/scroll-tech/go-ethereum@scroll-v4.3.18 && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/bridge-history-api/ -w .
goimports -local $(PWD)/common/ -w .

File diff suppressed because one or more lines are too long

View File

@@ -95,9 +95,6 @@ func (b *BatchInfoFetcher) fetchBatchInfo() error {
} else {
startHeight = latestBatchHeight + 1
}
if startHeight < b.batchInfoStartNumber {
startHeight = b.batchInfoStartNumber
}
for from := startHeight; number >= from; from += fetchLimit {
to := from + fetchLimit - 1
// number - confirmation can never less than 0 since the for loop condition

View File

@@ -53,11 +53,11 @@ func (m *MsgProofUpdater) Start() {
continue
}
latestBatchIndexWithProof, err := m.l2SentMsgOrm.GetLatestL2SentMsgBatchIndex(m.ctx)
log.Info("latest batc with proof", "batch_index", latestBatchIndexWithProof)
if err != nil {
log.Error("MsgProofUpdater: Can not get latest L2SentMsgBatchIndex: ", "err", err)
continue
}
log.Info("latest batch with proof", "batch_index", latestBatchIndexWithProof)
var start uint64
if latestBatchIndexWithProof < 0 {
start = 1

View File

@@ -3,7 +3,7 @@ module bridge-history-api
go 1.19
require (
github.com/ethereum/go-ethereum v1.12.2
github.com/ethereum/go-ethereum v1.12.0
github.com/gin-contrib/cors v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/mattn/go-colorable v0.1.13
@@ -33,13 +33,13 @@ require (
github.com/consensys/bavard v0.1.13 // indirect
github.com/consensys/gnark-crypto v0.10.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/crate-crypto/go-kzg-4844 v0.3.0 // indirect
github.com/crate-crypto/go-kzg-4844 v0.2.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set/v2 v2.1.0 // indirect
github.com/decred/dcrd/dcrec/secp256k1/v4 v4.2.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/docker/docker v23.0.6+incompatible // indirect
github.com/ethereum/c-kzg-4844 v0.3.1 // indirect
github.com/ethereum/c-kzg-4844 v0.2.0 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
@@ -61,7 +61,6 @@ require (
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
github.com/hashicorp/go-bexpr v0.1.10 // indirect
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 // indirect
github.com/holiman/bloomfilter/v2 v2.0.3 // indirect
github.com/holiman/uint256 v1.2.3 // indirect
github.com/huin/goupnp v1.0.3 // indirect
@@ -104,7 +103,7 @@ require (
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/status-im/keycard-go v0.2.0 // indirect
github.com/supranational/blst v0.3.11 // indirect
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b // indirect
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
@@ -114,12 +113,12 @@ require (
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.11.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect

View File

@@ -66,8 +66,8 @@ github.com/coreos/go-semver v0.2.0/go.mod h1:nnelYz7RCh+5ahJtPPxZlU+153eP4D4r3Ee
github.com/cpuguy83/go-md2man v1.0.10/go.mod h1:SmD6nW6nTyfqj6ABTjUi3V3JVMnlJmwcJI5acqYI6dE=
github.com/cpuguy83/go-md2man/v2 v2.0.2 h1:p1EgwI/C7NhT0JmVkwCD2ZBK8j4aeHQX2pMHHBfMQ6w=
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/crate-crypto/go-kzg-4844 v0.3.0 h1:UBlWE0CgyFqqzTI+IFyCzA7A3Zw4iip6uzRv5NIXG0A=
github.com/crate-crypto/go-kzg-4844 v0.3.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/crate-crypto/go-kzg-4844 v0.2.0 h1:UVuHOE+5tIWrim4zf/Xaa43+MIsDCPyW76QhUpiMGj4=
github.com/crate-crypto/go-kzg-4844 v0.2.0/go.mod h1:SBP7ikXEgDnUPONgm33HtuDZEDtWa3L4QtN1ocJSEQ4=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/cyberdelia/templates v0.0.0-20141128023046-ca7fffd4298c/go.mod h1:GyV+0YP4qX0UQ7r2MoYZ+AvYDp12OF5yg4q8rGnyNh4=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -94,10 +94,10 @@ github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.m
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/etcd-io/bbolt v1.3.3/go.mod h1:ZF2nL25h33cCyBtcyWeZ2/I3HQOfTP+0PIEvHjkjCrw=
github.com/ethereum/c-kzg-4844 v0.3.1 h1:sR65+68+WdnMKxseNWxSJuAv2tsUrihTpVBTfM/U5Zg=
github.com/ethereum/c-kzg-4844 v0.3.1/go.mod h1:VewdlzQmpT5QSrVhbBuGoCdFJkpaJlO1aQputP83wc0=
github.com/ethereum/go-ethereum v1.12.2 h1:eGHJ4ij7oyVqUQn48LBz3B7pvQ8sV0wGJiIE6gDq/6Y=
github.com/ethereum/go-ethereum v1.12.2/go.mod h1:1cRAEV+rp/xX0zraSCBnu9Py3HQ+geRMj3HdR+k0wfI=
github.com/ethereum/c-kzg-4844 v0.2.0 h1:+cUvymlnoDDQgMInp25Bo3OmLajmmY8mLJ/tLjqd77Q=
github.com/ethereum/c-kzg-4844 v0.2.0/go.mod h1:WI2Nd82DMZAAZI1wV2neKGost9EKjvbpQR9OqE5Qqa8=
github.com/ethereum/go-ethereum v1.12.0 h1:bdnhLPtqETd4m3mS8BGMNvBTf36bO5bx/hxE2zljOa0=
github.com/ethereum/go-ethereum v1.12.0/go.mod h1:/oo2X/dZLJjf2mJ6YT9wcWxa4nNJDBKDBU6sFIpx1Gs=
github.com/fasthttp-contrib/websocket v0.0.0-20160511215533-1f3b11f56072/go.mod h1:duJ4Jxv5lDcvg4QuQr0oowTf7dz4/CR8NtyCooz9HL8=
github.com/fatih/structs v1.1.0/go.mod h1:9NiDSp5zOcgEDl+j00MP/WkGVPOlPRLejGD8Ga6PJ7M=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
@@ -212,8 +212,6 @@ github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpx
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/go-version v1.2.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09ZGVZPK5anwXA=
github.com/hashicorp/hcl v1.0.0/go.mod h1:E5yfLk+7swimpb2L/Alb/PJmXilQ/rhwaUYs4T20WEQ=
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7 h1:3JQNjnMRil1yD0IfZKHF9GxxWKDJGj8I0IqOUol//sw=
github.com/holiman/billy v0.0.0-20230718173358-1c7e68d277a7/go.mod h1:5GuXa7vkL8u9FkFuWdVvfR5ix8hRB7DbOAaYULamFpc=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.3 h1:K8UWO1HUJpRMXBxbmaY1Y8IAMZC/RsKB+ArEnnK4l5o=
@@ -433,8 +431,8 @@ github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/supranational/blst v0.3.11 h1:LyU6FolezeWAhvQk0k6O/d49jqgO52MSDDfYgbeoEm4=
github.com/supranational/blst v0.3.11/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b h1:u49mjRnygnB34h8OKbnNJFVUtWSKIKb1KukdV8bILUM=
github.com/supranational/blst v0.3.11-0.20230406105308-e9dfc5ee724b/go.mod h1:jZJtfjgudtNl4en1tzwPIV3KjUnQUvG3/j+w+fVonLw=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+KdJV0CM=
@@ -493,11 +491,11 @@ golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad h1:g0bG7Z4uG+OgH2QDODnjp6ggkk1bJDsINcuWmJN1iJU=
golang.org/x/exp v0.0.0-20230810033253-352e893a4cad/go.mod h1:FXUEEKJgO7OQYeo8N01OfiKP8RXMtf6e8aTskBGqWdc=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771 h1:xP7rWLUr1e1n2xkK5YB4LI0hPEy3LJC6Wk+D4pGlOJg=
golang.org/x/exp v0.0.0-20230206171751-46f607a40771/go.mod h1:CxIveKay+FTh1D0yPZemJVgC/95VzuuOLq5Qi4xnoYc=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
@@ -587,8 +585,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -600,8 +598,8 @@ golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=

View File

@@ -15,7 +15,6 @@ func Route(router *gin.Engine, conf *config.Config) {
router.Use(cors.New(cors.Config{
AllowOrigins: []string{"*"},
AllowMethods: []string{"GET", "POST", "PUT", "DELETE"},
AllowHeaders: []string{"Origin", "Content-Type", "Authorization"},
AllowCredentials: true,
MaxAge: 12 * time.Hour,
}))

View File

@@ -204,7 +204,7 @@ func (c *CrossMsg) DeleteL1CrossMsgAfterHeight(ctx context.Context, height uint6
// GetL2CrossMsgByHash returns layer2 cross message by given hash
func (c *CrossMsg) GetL2CrossMsgByHash(ctx context.Context, l2Hash common.Hash) (*CrossMsg, error) {
var result CrossMsg
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer2_hash = ? AND msg_type = ?", l2Hash.String(), Layer2Msg).First(&result).Error
err := c.db.WithContext(ctx).Model(&CrossMsg{}).Where("layer2_hash = ? AND msg_type = ?", l2Hash.String(), Layer1Msg).First(&result).Error
if err != nil {
if errors.Is(err, gorm.ErrRecordNotFound) {
return nil, nil

View File

@@ -2,7 +2,6 @@ package orm
import (
"context"
"errors"
"fmt"
"time"
@@ -103,9 +102,6 @@ func (l *L2SentMsg) GetLatestL2SentMsgBatchIndex(ctx context.Context) (int64, er
Select("batch_index").
First(&result).
Error
if err != nil && errors.Is(err, gorm.ErrRecordNotFound) {
return -1, nil
}
if err != nil {
return -1, fmt.Errorf("L2SentMsg.GetLatestL2SentMsgBatchIndex error: %w", err)
}

View File

@@ -1,6 +1,7 @@
package utils
import (
"bytes"
"context"
"encoding/binary"
"errors"
@@ -75,14 +76,6 @@ func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error
method := backendabi.ScrollChainV2ABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
// special case: import genesis batch
method = backendabi.ScrollChainV2ABI.Methods["importGenesisBatch"]
_, err2 := method.Inputs.Unpack(calldata[4:])
if err2 == nil {
// genesis batch
return 0, 0, 0, nil
}
// none of "commitBatch" and "importGenesisBatch" match, give up
return 0, 0, 0, err
}
args := commitBatchArgs{}
@@ -117,3 +110,48 @@ func GetBatchRangeFromCalldataV2(calldata []byte) (uint64, uint64, uint64, error
return batchIndex, startBlock, finishBlock, err
}
// GetBatchRangeFromCalldataV1 find the block range from calldata, both inclusive.
func GetBatchRangeFromCalldataV1(calldata []byte) ([]uint64, []uint64, []uint64, error) {
var batchIndices []uint64
var startBlocks []uint64
var finishBlocks []uint64
if bytes.Equal(calldata[0:4], common.Hex2Bytes("cb905499")) {
// commitBatches
method := backendabi.ScrollChainABI.Methods["commitBatches"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
args := make([]backendabi.IScrollChainBatch, len(values))
err = method.Inputs.Copy(&args, values)
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
for i := 0; i < len(args); i++ {
batchIndices = append(batchIndices, args[i].BatchIndex)
startBlocks = append(startBlocks, args[i].Blocks[0].BlockNumber)
finishBlocks = append(finishBlocks, args[i].Blocks[len(args[i].Blocks)-1].BlockNumber)
}
} else if bytes.Equal(calldata[0:4], common.Hex2Bytes("8c73235d")) {
// commitBatch
method := backendabi.ScrollChainABI.Methods["commitBatch"]
values, err := method.Inputs.Unpack(calldata[4:])
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
args := backendabi.IScrollChainBatch{}
err = method.Inputs.Copy(&args, values)
if err != nil {
return batchIndices, startBlocks, finishBlocks, err
}
batchIndices = append(batchIndices, args.BatchIndex)
startBlocks = append(startBlocks, args.Blocks[0].BlockNumber)
finishBlocks = append(finishBlocks, args.Blocks[len(args.Blocks)-1].BlockNumber)
} else {
return batchIndices, startBlocks, finishBlocks, errors.New("invalid selector")
}
return batchIndices, startBlocks, finishBlocks, nil
}

View File

@@ -1,6 +1,7 @@
package utils_test
import (
"os"
"testing"
"github.com/ethereum/go-ethereum/common"
@@ -33,11 +34,31 @@ func TestGetBatchRangeFromCalldataV2(t *testing.T) {
assert.Equal(t, start, uint64(10))
assert.Equal(t, finish, uint64(20))
assert.Equal(t, batchIndex, uint64(2))
// genesis batch
batchIndex, start, finish, err = utils.GetBatchRangeFromCalldataV2(common.Hex2Bytes("3fdeecb200000000000000000000000000000000000000000000000000000000000000402dcb5308098d24a37fc1487a229fcedb09fa4343ede39cbad365bc925535bb09000000000000000000000000000000000000000000000000000000000000005900000000000000000000000000000000000000000000000000c252bc9780c4d83cf11f14b8cd03c92c4d18ce07710ba836d31d12da216c8330000000000000000000000000000000000000000000000000000000000000000000000000000000"))
assert.NoError(t, err)
assert.Equal(t, start, uint64(0))
assert.Equal(t, finish, uint64(0))
assert.Equal(t, batchIndex, uint64(0))
}
func TestGetBatchRangeFromCalldataV1(t *testing.T) {
calldata, err := os.ReadFile("../testdata/commit-batches-0x3095e91db7ba4a6fbf4654d607db322e58ff5579c502219c8024acaea74cf311.txt")
assert.NoError(t, err)
// multiple batches
batchIndices, startBlocks, finishBlocks, err := utils.GetBatchRangeFromCalldataV1(common.Hex2Bytes(string(calldata[:])))
assert.NoError(t, err)
assert.Equal(t, len(batchIndices), 5)
assert.Equal(t, len(startBlocks), 5)
assert.Equal(t, len(finishBlocks), 5)
assert.Equal(t, batchIndices[0], uint64(1))
assert.Equal(t, batchIndices[1], uint64(2))
assert.Equal(t, batchIndices[2], uint64(3))
assert.Equal(t, batchIndices[3], uint64(4))
assert.Equal(t, batchIndices[4], uint64(5))
assert.Equal(t, startBlocks[0], uint64(1))
assert.Equal(t, startBlocks[1], uint64(6))
assert.Equal(t, startBlocks[2], uint64(7))
assert.Equal(t, startBlocks[3], uint64(19))
assert.Equal(t, startBlocks[4], uint64(20))
assert.Equal(t, finishBlocks[0], uint64(5))
assert.Equal(t, finishBlocks[1], uint64(6))
assert.Equal(t, finishBlocks[2], uint64(18))
assert.Equal(t, finishBlocks[3], uint64(19))
assert.Equal(t, finishBlocks[4], uint64(20))
}

File diff suppressed because one or more lines are too long

View File

@@ -15,8 +15,8 @@ func TestEventSignature(t *testing.T) {
assert.Equal(L1RelayedMessageEventSignature, common.HexToHash("4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"))
assert.Equal(L1FailedRelayedMessageEventSignature, common.HexToHash("99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"))
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2c32d4ae151744d0bf0b9464a3e897a1d17ed2f1af71f7c9a75f12ce0d28238f"))
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("26ba82f907317eedc97d0cbef23de76a43dd6edb563bdb6e9407645b950a7a2d"))
assert.Equal(L1CommitBatchEventSignature, common.HexToHash("2cdc615c74452778c0fb6184735e014c13aad2b62774fe0b09bd1dcc2cc14a62"))
assert.Equal(L1FinalizeBatchEventSignature, common.HexToHash("9d3058a3cb9739a2527f22dd9a4138065844037d3004254952e2458d808cc364"))
assert.Equal(L1QueueTransactionEventSignature, common.HexToHash("69cfcb8e6d4192b8aba9902243912587f37e550d75c1fa801491fce26717f37e"))

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -60,8 +59,8 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
log.Error("failed to connect l1 geth", "config file", cfgFile, "error", err)
@@ -73,12 +72,8 @@ func action(ctx *cli.Context) error {
log.Error("failed to connect l2 geth", "config file", cfgFile, "error", err)
return err
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations, cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l2watcher := watcher.NewL2WatcherClient(ctx.Context, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress, cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
go utils.Loop(subCtx, 10*time.Second, func() {
if loopErr := l1watcher.FetchContractEvent(); loopErr != nil {

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -62,8 +61,8 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1client, err := ethclient.Dial(cfg.L1Config.Endpoint)
if err != nil {
@@ -79,14 +78,14 @@ func action(ctx *cli.Context) error {
}
l1watcher := watcher.NewL1WatcherClient(ctx.Context, l1client, cfg.L1Config.StartHeight, cfg.L1Config.Confirmations,
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db, registry)
cfg.L1Config.L1MessengerAddress, cfg.L1Config.L1MessageQueueAddress, cfg.L1Config.ScrollChainContractAddress, db)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err
}
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, false /* initGenesis */)
if err != nil {
log.Error("failed to create new l2 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -60,10 +59,10 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig, registry)
l1relayer, err := relayer.NewLayer1Relayer(ctx.Context, db, cfg.L1Config.RelayerConfig)
if err != nil {
log.Error("failed to create new l1 relayer", "config file", cfgFile, "error", err)
return err

View File

@@ -7,7 +7,6 @@ import (
"os/signal"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -63,8 +62,8 @@ func action(ctx *cli.Context) error {
}
}()
registry := prometheus.DefaultRegisterer
metrics.Server(ctx, registry.(*prometheus.Registry))
// Start metrics server.
metrics.Serve(subCtx, ctx)
// Init l2geth connection
l2client, err := ethclient.Dial(cfg.L2Config.Endpoint)
@@ -74,26 +73,26 @@ func action(ctx *cli.Context) error {
}
initGenesis := ctx.Bool(utils.ImportGenesisFlag.Name)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis, registry)
l2relayer, err := relayer.NewLayer2Relayer(ctx.Context, l2client, db, cfg.L2Config.RelayerConfig, initGenesis)
if err != nil {
log.Error("failed to create l2 relayer", "config file", cfgFile, "error", err)
return err
}
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db, registry)
chunkProposer := watcher.NewChunkProposer(subCtx, cfg.L2Config.ChunkProposerConfig, db)
if err != nil {
log.Error("failed to create chunkProposer", "config file", cfgFile, "error", err)
return err
}
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db, registry)
batchProposer := watcher.NewBatchProposer(subCtx, cfg.L2Config.BatchProposerConfig, db)
if err != nil {
log.Error("failed to create batchProposer", "config file", cfgFile, "error", err)
return err
}
l2watcher := watcher.NewL2WatcherClient(subCtx, l2client, cfg.L2Config.Confirmations, cfg.L2Config.L2MessengerAddress,
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db, registry)
cfg.L2Config.L2MessageQueueAddress, cfg.L2Config.WithdrawTrieRootSlot, db)
// Watcher loop to fetch missing blocks
go utils.LoopWithContext(subCtx, 2*time.Second, func(ctx context.Context) {
@@ -107,11 +106,11 @@ func action(ctx *cli.Context) error {
go utils.Loop(subCtx, 2*time.Second, chunkProposer.TryProposeChunk)
go utils.Loop(subCtx, 10*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, batchProposer.TryProposeBatch)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessPendingBatches)
go utils.Loop(subCtx, 15*time.Second, l2relayer.ProcessCommittedBatches)
go utils.Loop(subCtx, 2*time.Second, l2relayer.ProcessCommittedBatches)
// Finish start all rollup relayer functions.
log.Info("Start rollup-relayer successfully")

View File

@@ -27,8 +27,12 @@
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313"
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1313131313131313131313131313131313131313131313131313131313131313"
]
}
},
"l2_config": {
@@ -58,15 +62,22 @@
"gas_price_diff": 50000
},
"finalize_batch_interval_sec": 0,
"message_sender_private_key": "1212121212121212121212121212121212121212121212121212121212121212",
"gas_oracle_sender_private_key": "1313131313131313131313131313131313131313131313131313131313131313",
"commit_sender_private_key": "1414141414141414141414141414141414141414141414141414141414141414",
"finalize_sender_private_key": "1515151515151515151515151515151515151515151515151515151515151515"
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"gas_oracle_sender_private_keys": [
"1313131313131313131313131313131313131313131313131313131313131313"
],
"rollup_sender_private_keys": [
"1414141414141414141414141414141414141414141414141414141414141414"
]
},
"chunk_proposer_config": {
"max_tx_gas_per_chunk": 1123456,
"max_l2_tx_num_per_chunk": 1123,
"max_l1_commit_gas_per_chunk": 11234567,
"max_l1_commit_calldata_size_per_chunk": 112345,
"min_l1_commit_calldata_size_per_chunk": 11234,
"chunk_timeout_sec": 300,
"max_row_consumption_per_chunk": 1048319,
"gas_cost_increase_multiplier": 1.2
@@ -75,6 +86,7 @@
"max_chunk_num_per_batch": 112,
"max_l1_commit_gas_per_batch": 11234567,
"max_l1_commit_calldata_size_per_batch": 112345,
"min_chunk_num_per_batch": 11,
"batch_timeout_sec": 300,
"gas_cost_increase_multiplier": 1.2
}

View File

@@ -4,17 +4,17 @@ go 1.19
require (
github.com/agiledragon/gomonkey/v2 v2.9.0
github.com/orcaman/concurrent-map v1.0.0
github.com/orcaman/concurrent-map/v2 v2.0.1
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/smartystreets/goconvey v1.8.0
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
golang.org/x/sync v0.3.0
gorm.io/gorm v1.25.2
)
require (
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
@@ -22,7 +22,6 @@ require (
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gopherjs/gopherjs v1.17.2 // indirect
@@ -38,12 +37,8 @@ require (
github.com/mattn/go-colorable v0.1.13 // indirect
github.com/mattn/go-isatty v0.0.19 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.10.0 // indirect
@@ -57,11 +52,10 @@ require (
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/time v0.3.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -2,8 +2,6 @@ github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/agiledragon/gomonkey/v2 v2.9.0 h1:PDiKKybR596O6FHW+RVSG0Z7uGCBNbmbUXh3uCNQ7Hc=
github.com/agiledragon/gomonkey/v2 v2.9.0/go.mod h1:ap1AmDzcVOAz1YpeJ3TCzIgstoaWLA6jbbgxfB4w2iY=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
@@ -33,14 +31,8 @@ github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiU
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb h1:PBC98N2aIaM3XXiurYmW7fx4GZkL8feAMVq7nEjURHk=
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -70,8 +62,11 @@ github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlT
github.com/jtolds/gls v4.20.0+incompatible h1:xdiiI2gbIgH/gLH7ADydsJ1uDOEzR8yvV7C0MuV77Wo=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
@@ -82,8 +77,6 @@ github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APP
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.14 h1:+xnbZSEeDbOIg5/mE6JF0w6n9duR1l3/WmbinWVwUuU=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
@@ -91,20 +84,14 @@ github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/orcaman/concurrent-map/v2 v2.0.1 h1:jOJ5Pg2w1oeB6PeDurIYf6k9PQ+aTITr/6lP/L/zp6c=
github.com/orcaman/concurrent-map/v2 v2.0.1/go.mod h1:9Eq3TG2oBe5FirmYWQfYO5iH1q0Jv47PLaNK++uCdOM=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rivo/uniseg v0.4.4 h1:8TfxU8dW6PdqD27gjM8MVNuicgxIjxpm4K7x4jp8sis=
@@ -117,8 +104,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28 h1:CECBTWhZ5NGAn8lGFB4ooRAYxZns8PXoX8kTR/14c04=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/shirou/gopsutil v3.21.11+incompatible h1:+1+c1VGhc88SSonWP6foOcLhvnKlUeu/erjjvaPEYiI=
@@ -149,13 +136,12 @@ github.com/yusufpapurcu/wmi v1.2.2/go.mod h1:SBZ9tNy3G9/m5Oi98Zks0QjeHVDvuK0qfxQ
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0 h1:ftCYgMx6zT/asHUrPw8BLLscYtGznsLAnjq5RH9P66E=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
@@ -166,20 +152,16 @@ golang.org/x/sys v0.0.0-20190916202348-b4ddaad3f8a3/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4=
golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20190328211700-ab21143f2384/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7GC73Ei8pv4MzjDUNPHgQWJdtMAaDU=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=

View File

@@ -15,6 +15,10 @@ func TestConfig(t *testing.T) {
cfg, err := NewConfig("../../conf/config.json")
assert.NoError(t, err)
assert.Len(t, cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys, 1)
assert.Len(t, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys, 1)
assert.Len(t, cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys, 1)
data, err := json.Marshal(cfg)
assert.NoError(t, err)

View File

@@ -28,9 +28,11 @@ type L2Config struct {
// ChunkProposerConfig loads chunk_proposer configuration items.
type ChunkProposerConfig struct {
MaxTxGasPerChunk uint64 `json:"max_tx_gas_per_chunk"`
MaxL2TxNumPerChunk uint64 `json:"max_l2_tx_num_per_chunk"`
MaxL1CommitGasPerChunk uint64 `json:"max_l1_commit_gas_per_chunk"`
MaxL1CommitCalldataSizePerChunk uint64 `json:"max_l1_commit_calldata_size_per_chunk"`
MinL1CommitCalldataSizePerChunk uint64 `json:"min_l1_commit_calldata_size_per_chunk"`
ChunkTimeoutSec uint64 `json:"chunk_timeout_sec"`
MaxRowConsumptionPerChunk uint64 `json:"max_row_consumption_per_chunk"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
@@ -41,6 +43,7 @@ type BatchProposerConfig struct {
MaxChunkNumPerBatch uint64 `json:"max_chunk_num_per_batch"`
MaxL1CommitGasPerBatch uint64 `json:"max_l1_commit_gas_per_batch"`
MaxL1CommitCalldataSizePerBatch uint32 `json:"max_l1_commit_calldata_size_per_batch"`
MinChunkNumPerBatch uint64 `json:"min_chunk_num_per_batch"`
BatchTimeoutSec uint64 `json:"batch_timeout_sec"`
GasCostIncreaseMultiplier float64 `json:"gas_cost_increase_multiplier"`
}

View File

@@ -34,11 +34,12 @@ type SenderConfig struct {
// The interval (in seconds) to check balance and top up sender's accounts
CheckBalanceTime uint64 `json:"check_balance_time"`
// The sender's pending count limit.
PendingLimit int `json:"pending_limit"`
PendingLimit int `json:"pending_limit,omitempty"`
}
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
@@ -55,10 +56,9 @@ type RelayerConfig struct {
// MessageRelayMinGasLimit to avoid OutOfGas error
MessageRelayMinGasLimit uint64 `json:"message_relay_min_gas_limit,omitempty"`
// The private key of the relayer
MessageSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
GasOracleSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
CommitSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
FinalizeSenderPrivateKey *ecdsa.PrivateKey `json:"-"`
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
GasOracleSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// GasOracleConfig The config for updating gas price oracle.
@@ -72,61 +72,46 @@ type GasOracleConfig struct {
// relayerConfigAlias RelayerConfig alias name
type relayerConfigAlias RelayerConfig
func convertAndCheck(key string, uniqueAddressesSet map[string]struct{}) (*ecdsa.PrivateKey, error) {
if key == "" {
return nil, nil
}
privKey, err := crypto.ToECDSA(common.FromHex(key))
if err != nil {
return nil, err
}
addr := crypto.PubkeyToAddress(privKey.PublicKey).Hex()
if _, exists := uniqueAddressesSet[addr]; exists {
return nil, fmt.Errorf("detected duplicated address for private key: %s", addr)
}
uniqueAddressesSet[addr] = struct{}{}
return privKey, nil
}
// UnmarshalJSON unmarshal relayer_config struct.
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
var privateKeysConfig struct {
var jsonConfig struct {
relayerConfigAlias
MessageSenderPrivateKey string `json:"message_sender_private_key"`
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}
var err error
if err = json.Unmarshal(input, &privateKeysConfig); err != nil {
return fmt.Errorf("failed to unmarshal private keys config: %w", err)
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
*r = RelayerConfig(privateKeysConfig.relayerConfigAlias)
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
uniqueAddressesSet := make(map[string]struct{})
r.MessageSenderPrivateKey, err = convertAndCheck(privateKeysConfig.MessageSenderPrivateKey, uniqueAddressesSet)
if err != nil {
return fmt.Errorf("error converting and checking message sender private key: %w", err)
// Get messenger private key list.
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
}
r.GasOracleSenderPrivateKey, err = convertAndCheck(privateKeysConfig.GasOracleSenderPrivateKey, uniqueAddressesSet)
if err != nil {
return fmt.Errorf("error converting and checking gas oracle sender private key: %w", err)
// Get gas oracle private key list.
for _, privStr := range jsonConfig.GasOracleSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.GasOracleSenderPrivateKeys = append(r.GasOracleSenderPrivateKeys, priv)
}
r.CommitSenderPrivateKey, err = convertAndCheck(privateKeysConfig.CommitSenderPrivateKey, uniqueAddressesSet)
if err != nil {
return fmt.Errorf("error converting and checking commit sender private key: %w", err)
}
r.FinalizeSenderPrivateKey, err = convertAndCheck(privateKeysConfig.FinalizeSenderPrivateKey, uniqueAddressesSet)
if err != nil {
return fmt.Errorf("error converting and checking finalize sender private key: %w", err)
// Get rollup private key
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect prover_private_key format, err: %v", err)
}
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
}
return nil
@@ -134,20 +119,28 @@ func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
// MarshalJSON marshal RelayerConfig config, transfer private keys.
func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
privateKeysConfig := struct {
jsonConfig := struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKey string `json:"message_sender_private_key"`
GasOracleSenderPrivateKey string `json:"gas_oracle_sender_private_key"`
CommitSenderPrivateKey string `json:"commit_sender_private_key"`
FinalizeSenderPrivateKey string `json:"finalize_sender_private_key"`
}{}
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
GasOracleSenderPrivateKeys []string `json:"gas_oracle_sender_private_keys,omitempty"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}{relayerConfigAlias(*r), nil, nil, nil}
privateKeysConfig.relayerConfigAlias = relayerConfigAlias(*r)
privateKeysConfig.MessageSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.MessageSenderPrivateKey))
privateKeysConfig.GasOracleSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.GasOracleSenderPrivateKey))
privateKeysConfig.CommitSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.CommitSenderPrivateKey))
privateKeysConfig.FinalizeSenderPrivateKey = common.Bytes2Hex(crypto.FromECDSA(r.FinalizeSenderPrivateKey))
// Transfer message sender private keys to hex type.
for _, priv := range r.MessageSenderPrivateKeys {
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
return json.Marshal(&privateKeysConfig)
// Transfer rollup sender private keys to hex type.
for _, priv := range r.GasOracleSenderPrivateKeys {
jsonConfig.GasOracleSenderPrivateKeys = append(jsonConfig.GasOracleSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
// Transfer rollup sender private keys to hex type.
for _, priv := range r.RollupSenderPrivateKeys {
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
return json.Marshal(&jsonConfig)
}

View File

@@ -3,16 +3,17 @@ package relayer
import (
"context"
"errors"
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -21,6 +22,11 @@ import (
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL1MsgsRelayedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/total", metrics.ScrollRegistry)
bridgeL1MsgsRelayedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/relayed/confirmed/total", metrics.ScrollRegistry)
)
// Layer1Relayer is responsible for
// 1. fetch pending L1Message from db
// 2. relay pending message to layer 2 node
@@ -47,21 +53,23 @@ type Layer1Relayer struct {
l1MessageOrm *orm.L1Message
l1BlockOrm *orm.L1Block
metrics *l1RelayerMetrics
}
// NewLayer1Relayer will return a new instance of Layer1RelayerClient
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig, reg prometheus.Registerer) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l1_relayer", "message_sender", reg)
func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfig) (*Layer1Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new message sender failed for address %s, err: %v", addr.Hex(), err)
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new MessageSender failed", "main address", addr.String(), "err", err)
return nil, err
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l1_relayer", "gas_oracle_sender", reg)
// @todo make sure only one sender is available
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %v", addr.Hex(), err)
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKeys[0].PublicKey)
log.Error("new GasOracleSender failed", "main address", addr.String(), "err", err)
return nil, err
}
var minGasPrice uint64
@@ -80,7 +88,6 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
}
l1Relayer := &Layer1Relayer{
cfg: cfg,
ctx: ctx,
l1MessageOrm: orm.NewL1Message(db),
l1BlockOrm: orm.NewL1Block(db),
@@ -95,9 +102,9 @@ func NewLayer1Relayer(ctx context.Context, db *gorm.DB, cfg *config.RelayerConfi
minGasPrice: minGasPrice,
gasPriceDiff: gasPriceDiff,
}
l1Relayer.metrics = initL1RelayerMetrics(reg)
cfg: cfg,
}
go l1Relayer.handleConfirmLoop(ctx)
return l1Relayer, nil
@@ -118,9 +125,7 @@ func (r *Layer1Relayer) ProcessSavedEvents() {
for _, msg := range msgs {
tmpMsg := msg
r.metrics.bridgeL1RelayedMsgsTotal.Inc()
if err = r.processSavedEvent(&tmpMsg); err != nil {
r.metrics.bridgeL1RelayedMsgsFailureTotal.Inc()
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
@@ -142,6 +147,7 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
if err != nil {
return err
}
bridgeL1MsgsRelayedTotalCounter.Inc(1)
log.Info("relayMessage to layer2", "msg hash", msg.MsgHash, "tx hash", hash)
err = r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, msg.MsgHash, types.MsgSubmitted, hash.String())
@@ -153,7 +159,6 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// ProcessGasPriceOracle imports gas price to layer2
func (r *Layer1Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL1RelayerGasPriceOraclerRunTotal.Inc()
latestBlockHeight, err := r.l1BlockOrm.GetLatestL1BlockHeight(r.ctx)
if err != nil {
log.Warn("Failed to fetch latest L1 block height from db", "err", err)
@@ -198,7 +203,6 @@ func (r *Layer1Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = block.BaseFee
r.metrics.bridgeL1RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l1 base fee", "txHash", hash.String(), "baseFee", baseFee)
}
}
@@ -210,7 +214,7 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
case <-ctx.Done():
return
case cfm := <-r.messageSender.ConfirmChan():
r.metrics.bridgeL1MsgsRelayedConfirmedTotal.Inc()
bridgeL1MsgsRelayedConfirmedTotalCounter.Inc(1)
if !cfm.IsSuccessful {
err := r.l1MessageOrm.UpdateLayer1StatusAndLayer2Hash(r.ctx, cfm.ID, types.MsgRelayFailed, cfm.TxHash.String())
if err != nil {
@@ -226,7 +230,6 @@ func (r *Layer1Relayer) handleConfirmLoop(ctx context.Context) {
log.Info("transaction confirmed in layer2", "confirmation", cfm)
}
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL1GasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.l1BlockOrm.UpdateL1GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -1,54 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1RelayerMetrics struct {
bridgeL1RelayedMsgsTotal prometheus.Counter
bridgeL1RelayedMsgsFailureTotal prometheus.Counter
bridgeL1RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL1RelayerLastGasPrice prometheus.Gauge
bridgeL1MsgsRelayedConfirmedTotal prometheus.Counter
bridgeL1GasOraclerConfirmedTotal prometheus.Counter
}
var (
initL1RelayerMetricOnce sync.Once
l1RelayerMetric *l1RelayerMetrics
)
func initL1RelayerMetrics(reg prometheus.Registerer) *l1RelayerMetrics {
initL1RelayerMetricOnce.Do(func() {
l1RelayerMetric = &l1RelayerMetrics{
bridgeL1RelayedMsgsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_total",
Help: "The total number of the l1 relayed message.",
}),
bridgeL1RelayedMsgsFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_msg_relayed_failure_total",
Help: "The total number of the l1 relayed failure message.",
}),
bridgeL1MsgsRelayedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_relayed_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
bridgeL1RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_price_oracler_total",
Help: "The total number of layer1 gas price oracler run total",
}),
bridgeL1RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer1_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l1",
}),
bridgeL1GasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer1_gas_oracler_confirmed_total",
Help: "The total number of layer1 relayed confirmed",
}),
}
})
return l1RelayerMetric
}

View File

@@ -62,7 +62,7 @@ func setupL1RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewL1Relayer(t *testing.T) {
db := setupL1RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig, nil)
relayer, err := NewLayer1Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -72,7 +72,7 @@ func testL1RelayerProcessSaveEvents(t *testing.T) {
defer database.CloseDB(db)
l1MessageOrm := orm.NewL1Message(db)
l1Cfg := cfg.L1Config
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
relayer, err := NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, relayer)
assert.NoError(t, l1MessageOrm.SaveL1Messages(context.Background(), templateL1Message))
@@ -99,7 +99,7 @@ func testL1RelayerMsgConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -138,7 +138,7 @@ func testL1RelayerGasOracleConfirm(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -168,7 +168,7 @@ func testL1RelayerProcessGasPriceOracle(t *testing.T) {
l1Cfg := cfg.L1Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := NewLayer1Relayer(ctx, db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
assert.NotNil(t, l1Relayer)

View File

@@ -8,15 +8,15 @@ import (
"sync"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -25,6 +25,13 @@ import (
"scroll-tech/bridge/internal/orm"
)
var (
bridgeL2BatchesFinalizedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/total", metrics.ScrollRegistry)
bridgeL2BatchesFinalizedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/finalized/confirmed/total", metrics.ScrollRegistry)
bridgeL2BatchesCommittedConfirmedTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/batches/committed/confirmed/total", metrics.ScrollRegistry)
)
// Layer2Relayer is responsible for
// 1. Committing and finalizing L2 blocks on L1
// 2. Relaying messages from L2 to L1
@@ -46,9 +53,8 @@ type Layer2Relayer struct {
messageSender *sender.Sender
l1MessengerABI *abi.ABI
commitSender *sender.Sender
finalizeSender *sender.Sender
l1RollupABI *abi.ABI
rollupSender *sender.Sender
l1RollupABI *abi.ABI
gasOracleSender *sender.Sender
l2GasOracleABI *abi.ABI
@@ -70,33 +76,27 @@ type Layer2Relayer struct {
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch hash.
processingFinalization sync.Map
metrics *l2RelayerMetrics
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool, reg prometheus.Registerer) (*Layer2Relayer, error) {
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKey, "l2_relayer", "message_sender", reg)
func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.DB, cfg *config.RelayerConfig, initGenesis bool) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new message sender failed for address %s, err: %w", addr.Hex(), err)
log.Error("Failed to create messenger sender", "err", err)
return nil, err
}
commitSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.CommitSenderPrivateKey, "l2_relayer", "commit_sender", reg)
rollupSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.RollupSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.CommitSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new commit sender failed for address %s, err: %w", addr.Hex(), err)
}
finalizeSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.FinalizeSenderPrivateKey, "l2_relayer", "finalize_sender", reg)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.FinalizeSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new finalize sender failed for address %s, err: %w", addr.Hex(), err)
log.Error("Failed to create rollup sender", "err", err)
return nil, err
}
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKey, "l2_relayer", "gas_oracle_sender", reg)
gasOracleSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.GasOracleSenderPrivateKeys)
if err != nil {
addr := crypto.PubkeyToAddress(cfg.GasOracleSenderPrivateKey.PublicKey)
return nil, fmt.Errorf("new gas oracle sender failed for address %s, err: %w", addr.Hex(), err)
log.Error("Failed to create gas oracle sender", "err", err)
return nil, err
}
var minGasPrice uint64
@@ -127,9 +127,8 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
messageSender: messageSender,
l1MessengerABI: bridgeAbi.L1ScrollMessengerABI,
commitSender: commitSender,
finalizeSender: finalizeSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
rollupSender: rollupSender,
l1RollupABI: bridgeAbi.ScrollChainABI,
gasOracleSender: gasOracleSender,
l2GasOracleABI: bridgeAbi.L2GasPriceOracleABI,
@@ -151,7 +150,6 @@ func NewLayer2Relayer(ctx context.Context, l2Client *ethclient.Client, db *gorm.
return nil, fmt.Errorf("failed to initialize and commit genesis batch, err: %v", err)
}
}
layer2Relayer.metrics = initL2RelayerMetrics(reg)
go layer2Relayer.handleConfirmLoop(ctx)
return layer2Relayer, nil
@@ -232,7 +230,7 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
}
// submit genesis batch to L1 rollup contract
txHash, err := r.commitSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
txHash, err := r.rollupSender.SendTransaction(batchHash, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
return fmt.Errorf("failed to send import genesis batch tx to L1, error: %v", err)
}
@@ -247,14 +245,14 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
select {
// print progress
case <-ticker.C:
log.Info("Waiting for confirmation")
log.Info("Waiting for confirmation", "pending count", r.rollupSender.PendingCount())
// timeout
case <-time.After(5 * time.Minute):
return fmt.Errorf("import genesis timeout after 5 minutes, original txHash: %v", txHash.String())
// handle confirmation
case confirmation := <-r.commitSender.ConfirmChan():
case confirmation := <-r.rollupSender.ConfirmChan():
if confirmation.ID != batchHash {
return fmt.Errorf("unexpected import genesis confirmation id, expected: %v, got: %v", batchHash, confirmation.ID)
}
@@ -269,7 +267,6 @@ func (r *Layer2Relayer) commitGenesisBatch(batchHash string, batchHeader []byte,
// ProcessGasPriceOracle imports gas price to layer1
func (r *Layer2Relayer) ProcessGasPriceOracle() {
r.metrics.bridgeL2RelayerGasPriceOraclerRunTotal.Inc()
batch, err := r.batchOrm.GetLatestBatch(r.ctx)
if batch == nil || err != nil {
log.Error("Failed to GetLatestBatch", "batch", batch, "err", err)
@@ -307,7 +304,6 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
return
}
r.lastGasPrice = suggestGasPriceUint64
r.metrics.bridgeL2RelayerLastGasPrice.Set(float64(r.lastGasPrice))
log.Info("Update l2 gas price", "txHash", hash.String(), "GasPrice", suggestGasPrice)
}
}
@@ -316,13 +312,12 @@ func (r *Layer2Relayer) ProcessGasPriceOracle() {
// ProcessPendingBatches processes the pending batches by sending commitBatch transactions to layer 1.
func (r *Layer2Relayer) ProcessPendingBatches() {
// get pending batches from database in ascending order by their index.
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 1)
pendingBatches, err := r.batchOrm.GetPendingBatches(r.ctx, 10)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
}
for _, batch := range pendingBatches {
r.metrics.bridgeL2RelayerProcessPendingBatchTotal.Inc()
// get current header and parent header.
currentBatchHeader, err := types.DecodeBatchHeader(batch.BatchHeader)
if err != nil {
@@ -379,16 +374,11 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
// send transaction
txID := batch.Hash + "-commit"
txHash, err := r.commitSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), calldata, 0)
if err != nil {
log.Error(
"Failed to send commitBatch tx to layer1",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(calldata),
"err", err,
)
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
log.Error("Failed to send commitBatch tx to layer1 ", "err", err)
}
return
}
@@ -397,7 +387,7 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "hash", batch.Hash, "index", batch.Index, "err", err)
return
}
r.metrics.bridgeL2RelayerProcessPendingBatchSuccessTotal.Inc()
bridgeL2BatchesCommittedTotalCounter.Inc(1)
r.processingCommitment.Store(txID, batch.Hash)
log.Info("Sent the commitBatch tx to layer1", "batch index", batch.Index, "batch hash", batch.Hash, "tx hash", txHash.Hex())
}
@@ -421,8 +411,6 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
r.metrics.bridgeL2RelayerProcessCommittedBatchesTotal.Inc()
batch := batches[0]
hash := batch.Hash
status := types.ProvingStatus(batch.ProvingStatus)
@@ -430,9 +418,13 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
case types.ProvingTaskUnassigned, types.ProvingTaskAssigned:
// The proof for this block is not ready yet.
return
case types.ProvingTaskProved:
// It's an intermediate state. The prover manager received the proof but has not verified
// the proof yet. We don't roll up the proof until it's verified.
return
case types.ProvingTaskVerified:
log.Info("Start to roll up zk proof", "hash", hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedTotal.Inc()
success := false
var parentBatchStateRoot string
if batch.Index > 0 {
@@ -446,14 +438,24 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
parentBatchStateRoot = parentBatch.StateRoot
}
defer func() {
// TODO: need to revisit this and have a more fine-grained error handling
if !success {
log.Info("Failed to upload the proof, change rollup status to RollupFinalizeFailed", "hash", hash)
if err = r.batchOrm.UpdateRollupStatus(r.ctx, hash, types.RollupFinalizeFailed); err != nil {
log.Warn("UpdateRollupStatus failed", "hash", hash, "err", err)
}
}
}()
aggProof, err := r.batchOrm.GetVerifiedProofByHash(r.ctx, hash)
if err != nil {
log.Error("get verified proof by hash failed", "hash", hash, "err", err)
log.Warn("get verified proof by hash failed", "hash", hash, "err", err)
return
}
if err = aggProof.SanityCheck(); err != nil {
log.Error("agg_proof sanity check fails", "hash", hash, "error", err)
log.Warn("agg_proof sanity check fails", "hash", hash, "error", err)
return
}
@@ -472,55 +474,27 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
txID := hash + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.finalizeSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
finalizeTxHash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) && !errors.Is(err, sender.ErrFullPending) {
// This can happen normally if we try to finalize 2 or more
// batches around the same time. The 2nd tx might fail since
// the client does not see the 1st tx's updates at this point.
// TODO: add more fine-grained error handling
log.Error(
"finalizeBatchWithProof in layer1 failed",
"index", batch.Index,
"hash", batch.Hash,
"RollupContractAddress", r.cfg.RollupContractAddress,
"calldata", common.Bytes2Hex(data),
"err", err,
)
log.Error("finalizeBatchWithProof in layer1 failed",
"index", batch.Index, "hash", batch.Hash, "err", err)
}
return
}
bridgeL2BatchesFinalizedTotalCounter.Inc(1)
log.Info("finalizeBatchWithProof in layer1", "index", batch.Index, "batch hash", batch.Hash, "tx hash", hash)
// record and sync with db, @todo handle db error
err = r.batchOrm.UpdateFinalizeTxHashAndRollupStatus(r.ctx, hash, finalizeTxHash.String(), types.RollupFinalizing)
if err != nil {
log.Error("UpdateFinalizeTxHashAndRollupStatus failed",
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed",
"index", batch.Index, "batch hash", batch.Hash,
"tx hash", finalizeTxHash.String(), "err", err)
}
success = true
r.processingFinalization.Store(txID, hash)
r.metrics.bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal.Inc()
case types.ProvingTaskFailed:
// We were unable to prove this batch. There are two possibilities:
// (a) Prover bug. In this case, we should fix and redeploy the prover.
// In the meantime, we continue to commit batches to L1 as well as
// proposing and proving chunks and batches.
// (b) Unprovable batch, e.g. proof overflow. In this case we need to
// stop the ledger, fix the limit, revert all the violating blocks,
// chunks and batches and all subsequent ones, and resume, i.e. this
// case requires manual resolution.
log.Error(
"batch proving failed",
"Index", batch.Index,
"Hash", batch.Hash,
"ProverAssignedAt", batch.ProverAssignedAt,
"ProvedAt", batch.ProvedAt,
"ProofTimeSec", batch.ProofTimeSec,
)
return
default:
log.Error("encounter unreachable case in ProcessCommittedBatches", "proving status", status)
@@ -546,7 +520,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
r.metrics.bridgeL2BatchesCommittedConfirmedTotal.Inc()
bridgeL2BatchesCommittedConfirmedTotalCounter.Inc(1)
r.processingCommitment.Delete(confirmation.ID)
}
@@ -568,7 +542,7 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
"batch hash", batchHash.(string),
"tx hash", confirmation.TxHash.String(), "err", err)
}
r.metrics.bridgeL2BatchesFinalizedConfirmedTotal.Inc()
bridgeL2BatchesFinalizedConfirmedTotalCounter.Inc(1)
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
@@ -581,12 +555,9 @@ func (r *Layer2Relayer) handleConfirmLoop(ctx context.Context) {
return
case confirmation := <-r.messageSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.commitSender.ConfirmChan():
r.handleConfirmation(confirmation)
case confirmation := <-r.finalizeSender.ConfirmChan():
case confirmation := <-r.rollupSender.ConfirmChan():
r.handleConfirmation(confirmation)
case cfm := <-r.gasOracleSender.ConfirmChan():
r.metrics.bridgeL2BatchesGasOraclerConfirmedTotal.Inc()
if !cfm.IsSuccessful {
// @discuss: maybe make it pending again?
err := r.batchOrm.UpdateL2GasOracleStatusAndOracleTxHash(r.ctx, cfm.ID, types.GasOracleFailed, cfm.TxHash.String())

View File

@@ -1,74 +0,0 @@
package relayer
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2RelayerMetrics struct {
bridgeL2RelayerProcessPendingBatchTotal prometheus.Counter
bridgeL2RelayerProcessPendingBatchSuccessTotal prometheus.Counter
bridgeL2RelayerGasPriceOraclerRunTotal prometheus.Counter
bridgeL2RelayerLastGasPrice prometheus.Gauge
bridgeL2RelayerProcessCommittedBatchesTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal prometheus.Counter
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal prometheus.Counter
bridgeL2BatchesCommittedConfirmedTotal prometheus.Counter
bridgeL2BatchesFinalizedConfirmedTotal prometheus.Counter
bridgeL2BatchesGasOraclerConfirmedTotal prometheus.Counter
}
var (
initL2RelayerMetricOnce sync.Once
l2RelayerMetric *l2RelayerMetrics
)
func initL2RelayerMetrics(reg prometheus.Registerer) *l2RelayerMetrics {
initL2RelayerMetricOnce.Do(func() {
l2RelayerMetric = &l2RelayerMetrics{
bridgeL2RelayerProcessPendingBatchTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_total",
Help: "The total number of layer2 process pending batch",
}),
bridgeL2RelayerProcessPendingBatchSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_pending_batch_success_total",
Help: "The total number of layer2 process pending success batch",
}),
bridgeL2RelayerGasPriceOraclerRunTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_gas_price_oracler_total",
Help: "The total number of layer2 gas price oracler run total",
}),
bridgeL2RelayerLastGasPrice: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_layer2_gas_price_latest_gas_price",
Help: "The latest gas price of bridge relayer l2",
}),
bridgeL2RelayerProcessCommittedBatchesTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_total",
Help: "The total number of layer2 process committed batches run total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_total",
Help: "The total number of layer2 process committed batches finalized total",
}),
bridgeL2RelayerProcessCommittedBatchesFinalizedSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_finalized_success_total",
Help: "The total number of layer2 process committed batches finalized success total",
}),
bridgeL2BatchesCommittedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_committed_batches_confirmed_total",
Help: "The total number of layer2 process committed batches confirmed total",
}),
bridgeL2BatchesFinalizedConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_finalized_batches_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
bridgeL2BatchesGasOraclerConfirmedTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_layer2_process_gras_oracler_confirmed_total",
Help: "The total number of layer2 process finalized batches confirmed total",
}),
}
})
return l2RelayerMetric
}

View File

@@ -35,7 +35,7 @@ func setupL2RelayerDB(t *testing.T) *gorm.DB {
func testCreateNewRelayer(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)
}
@@ -45,7 +45,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
l2BlockOrm := orm.NewL2Block(db)
@@ -73,7 +73,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer database.CloseDB(db)
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
batchOrm := orm.NewBatch(db)
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
@@ -90,9 +90,10 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batch.Hash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
// no valid proof, rollup status remains the same
assert.Equal(t, types.RollupCommitted, statuses[0])
assert.Equal(t, types.RollupFinalizeFailed, statuses[0])
err = batchOrm.UpdateRollupStatus(context.Background(), batch.Hash, types.RollupCommitted)
assert.NoError(t, err)
proof := &message.BatchProof{
Proof: []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31},
}
@@ -106,7 +107,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
assert.Equal(t, types.RollupFinalizing, statuses[0])
}
func testL2RelayerCommitConfirm(t *testing.T) {
func testL2RelayerRollupConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
@@ -114,12 +115,12 @@ func testL2RelayerCommitConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"committed-1", "committed-2"}
isSuccessful := []bool{true, false}
processingKeys := []string{"committed-1", "committed-2", "finalized-1", "finalized-2"}
isSuccessful := []bool{true, false, true, false}
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
@@ -129,68 +130,29 @@ func testL2RelayerCommitConfirm(t *testing.T) {
batchHashes[i] = batch.Hash
}
for i, key := range processingKeys {
for i, key := range processingKeys[:2] {
l2Relayer.processingCommitment.Store(key, batchHashes[i])
l2Relayer.commitSender.SendConfirmation(&sender.Confirmation{
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i],
TxHash: common.HexToHash("0x123456789abcdef"),
})
}
for i, key := range processingKeys[2:] {
l2Relayer.processingFinalization.Store(key, batchHashes[i+2])
l2Relayer.rollupSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i+2],
TxHash: common.HexToHash("0x123456789abcdef"),
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.RollupStatus{
types.RollupCommitted,
types.RollupCommitFailed,
}
for i, batchHash := range batchHashes {
batchInDB, err := batchOrm.GetBatches(context.Background(), map[string]interface{}{"hash": batchHash}, nil, 0)
if err != nil || len(batchInDB) != 1 || types.RollupStatus(batchInDB[0].RollupStatus) != expectedStatuses[i] {
return false
}
}
return true
})
assert.True(t, ok)
}
func testL2RelayerFinalizeConfirm(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
// Create and set up the Layer2 Relayer.
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
assert.NoError(t, err)
// Simulate message confirmations.
processingKeys := []string{"finalized-1", "finalized-2"}
isSuccessful := []bool{true, false}
batchOrm := orm.NewBatch(db)
batchHashes := make([]string, len(processingKeys))
for i := range batchHashes {
batch, err := batchOrm.InsertBatch(context.Background(), 0, 1, chunkHash1.Hex(), chunkHash2.Hex(), []*types.Chunk{chunk1, chunk2})
assert.NoError(t, err)
batchHashes[i] = batch.Hash
}
for i, key := range processingKeys {
l2Relayer.processingFinalization.Store(key, batchHashes[i])
l2Relayer.finalizeSender.SendConfirmation(&sender.Confirmation{
ID: key,
IsSuccessful: isSuccessful[i],
TxHash: common.HexToHash("0x123456789abcdef"),
})
}
// Check the database for the updated status using TryTimes.
ok := utils.TryTimes(5, func() bool {
expectedStatuses := []types.RollupStatus{
types.RollupFinalized,
types.RollupFinalizeFailed,
}
@@ -221,7 +183,7 @@ func testL2RelayerGasOracleConfirm(t *testing.T) {
l2Cfg := cfg.L2Config
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := NewLayer2Relayer(ctx, l2Cli, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Simulate message confirmations.
@@ -259,7 +221,7 @@ func testLayer2RelayerProcessGasPriceOracle(t *testing.T) {
db := setupL2RelayerDB(t)
defer database.CloseDB(db)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false, nil)
relayer, err := NewLayer2Relayer(context.Background(), l2Cli, db, cfg.L2Config.RelayerConfig, false)
assert.NoError(t, err)
assert.NotNil(t, relayer)

View File

@@ -97,8 +97,7 @@ func TestFunctions(t *testing.T) {
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("TestL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("TestL2RelayerCommitConfirm", testL2RelayerCommitConfirm)
t.Run("TestL2RelayerFinalizeConfirm", testL2RelayerFinalizeConfirm)
t.Run("TestL2RelayerRollupConfirm", testL2RelayerRollupConfirm)
t.Run("TestL2RelayerGasOracleConfirm", testL2RelayerGasOracleConfirm)
t.Run("TestLayer2RelayerProcessGasPriceOracle", testLayer2RelayerProcessGasPriceOracle)
}

View File

@@ -0,0 +1,165 @@
package sender
import (
"context"
"crypto/ecdsa"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
)
type accountPool struct {
client *ethclient.Client
minBalance *big.Int
accounts map[common.Address]*bind.TransactOpts
accsCh chan *bind.TransactOpts
}
// newAccounts creates an accountPool instance.
func newAccountPool(ctx context.Context, minBalance *big.Int, client *ethclient.Client, privs []*ecdsa.PrivateKey) (*accountPool, error) {
accs := &accountPool{
client: client,
minBalance: minBalance,
accounts: make(map[common.Address]*bind.TransactOpts, len(privs)),
accsCh: make(chan *bind.TransactOpts, len(privs)+2),
}
// get chainID from client
chainID, err := client.ChainID(ctx)
if err != nil {
return nil, err
}
for _, privStr := range privs {
auth, err := bind.NewKeyedTransactorWithChainID(privStr, chainID)
if err != nil {
log.Error("failed to create account", "chainID", chainID.String(), "err", err)
return nil, err
}
// Set pending nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
return nil, err
}
auth.Nonce = big.NewInt(int64(nonce))
accs.accounts[auth.From] = auth
accs.accsCh <- auth
}
return accs, accs.checkAndSetBalances(ctx)
}
// getAccount get auth from channel.
func (a *accountPool) getAccount() *bind.TransactOpts {
select {
case auth := <-a.accsCh:
return auth
default:
return nil
}
}
// releaseAccount set used auth into channel.
func (a *accountPool) releaseAccount(auth *bind.TransactOpts) {
a.accsCh <- auth
}
// reSetNonce reset nonce if send signed tx failed.
func (a *accountPool) resetNonce(ctx context.Context, auth *bind.TransactOpts) {
nonce, err := a.client.PendingNonceAt(ctx, auth.From)
if err != nil {
log.Warn("failed to reset nonce", "address", auth.From.String(), "err", err)
return
}
auth.Nonce = big.NewInt(int64(nonce))
}
// checkAndSetBalance check balance and set min balance.
func (a *accountPool) checkAndSetBalances(ctx context.Context) error {
var (
root *bind.TransactOpts
maxBls = big.NewInt(0)
lostAuths []*bind.TransactOpts
)
for addr, auth := range a.accounts {
bls, err := a.client.BalanceAt(ctx, addr, nil)
if err != nil || bls.Cmp(a.minBalance) < 0 {
if err != nil {
log.Warn("failed to get balance", "address", addr.String(), "err", err)
return err
}
lostAuths = append(lostAuths, auth)
continue
} else if bls.Cmp(maxBls) > 0 { // Find the biggest balance account.
root, maxBls = auth, bls
}
}
if root == nil {
return fmt.Errorf("no account has enough balance")
}
if len(lostAuths) == 0 {
return nil
}
var (
tx *types.Transaction
err error
)
for _, auth := range lostAuths {
tx, err = a.createSignedTx(root, &auth.From, a.minBalance)
if err != nil {
return err
}
err = a.client.SendTransaction(ctx, tx)
if err != nil {
log.Error("Failed to send balance to account", "err", err)
return err
}
log.Debug("send balance to account", "account", auth.From.String(), "balance", a.minBalance.String())
}
// wait util mined
if _, err = bind.WaitMined(ctx, a.client, tx); err != nil {
return err
}
// Reset root's nonce.
a.resetNonce(ctx, root)
return nil
}
func (a *accountPool) createSignedTx(from *bind.TransactOpts, to *common.Address, value *big.Int) (*types.Transaction, error) {
gasPrice, err := a.client.SuggestGasPrice(context.Background())
if err != nil {
return nil, err
}
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := a.client.PendingNonceAt(context.Background(), from.From)
if err != nil {
return nil, err
}
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: to,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := from.Signer(from.From, tx)
if err != nil {
return nil, err
}
return signedTx, nil
}

View File

@@ -1,95 +0,0 @@
package sender
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type senderMetrics struct {
senderCheckBalancerTotal *prometheus.CounterVec
senderCheckPendingTransactionTotal *prometheus.CounterVec
sendTransactionTotal *prometheus.CounterVec
sendTransactionFailureFullTx *prometheus.GaugeVec
sendTransactionFailureRepeatTransaction *prometheus.CounterVec
sendTransactionFailureGetFee *prometheus.CounterVec
sendTransactionFailureSendTx *prometheus.CounterVec
resubmitTransactionTotal *prometheus.CounterVec
currentPendingTxsNum *prometheus.GaugeVec
currentGasFeeCap *prometheus.GaugeVec
currentGasTipCap *prometheus.GaugeVec
currentGasPrice *prometheus.GaugeVec
currentGasLimit *prometheus.GaugeVec
currentNonce *prometheus.GaugeVec
}
var (
initSenderMetricOnce sync.Once
sm *senderMetrics
)
func initSenderMetrics(reg prometheus.Registerer) *senderMetrics {
initSenderMetricOnce.Do(func() {
sm = &senderMetrics{
sendTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_total",
Help: "The total number of sending transaction.",
}, []string{"service", "name"}),
sendTransactionFailureFullTx: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_send_transaction_full_tx_failure_total",
Help: "The total number of sending transaction failure for full size tx.",
}, []string{"service", "name"}),
sendTransactionFailureRepeatTransaction: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_repeat_transaction_failure_total",
Help: "The total number of sending transaction failure for repeat transaction.",
}, []string{"service", "name"}),
sendTransactionFailureGetFee: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_get_fee_failure_total",
Help: "The total number of sending transaction failure for getting fee.",
}, []string{"service", "name"}),
sendTransactionFailureSendTx: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_send_tx_failure_total",
Help: "The total number of sending transaction failure for sending tx.",
}, []string{"service", "name"}),
resubmitTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_send_transaction_resubmit_send_transaction_total",
Help: "The total number of resubmit transaction.",
}, []string{"service", "name"}),
currentPendingTxsNum: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_pending_tx_count",
Help: "The pending tx count in the sender.",
}, []string{"service", "name"}),
currentGasFeeCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_fee_cap",
Help: "The gas fee of current transaction.",
}, []string{"service", "name"}),
currentGasTipCap: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_tip_cap",
Help: "The gas tip of current transaction.",
}, []string{"service", "name"}),
currentGasPrice: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_price_cap",
Help: "The gas price of current transaction.",
}, []string{"service", "name"}),
currentGasLimit: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_gas_limit",
Help: "The gas limit of current transaction.",
}, []string{"service", "name"}),
currentNonce: promauto.With(reg).NewGaugeVec(prometheus.GaugeOpts{
Name: "bridge_sender_nonce",
Help: "The nonce of current transaction.",
}, []string{"service", "name"}),
senderCheckPendingTransactionTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_pending_transaction_total",
Help: "The total number of check pending transaction.",
}, []string{"service", "name"}),
senderCheckBalancerTotal: promauto.With(reg).NewCounterVec(prometheus.CounterOpts{
Name: "bridge_sender_check_balancer_total",
Help: "The total number of check balancer.",
}, []string{"service", "name"}),
}
})
return sm
}

View File

@@ -11,7 +11,6 @@ import (
"time"
cmapV2 "github.com/orcaman/concurrent-map/v2"
"github.com/prometheus/client_golang/prometheus"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -40,6 +39,10 @@ var (
ErrFullPending = errors.New("sender's pending pool is full")
)
var (
defaultPendingLimit = 10
)
// Confirmation struct used to indicate transaction confirmation details
type Confirmation struct {
ID string
@@ -71,11 +74,9 @@ type Sender struct {
client *ethclient.Client // The client to retrieve on chain data or send transaction.
chainID *big.Int // The chain id of the endpoint
ctx context.Context
service string
name string
auth *bind.TransactOpts
minBalance *big.Int
// account fields.
auths *accountPool
blockNumber uint64 // Current block number on chain.
baseFeePerGas uint64 // Current base fee per gas on chain
@@ -83,40 +84,31 @@ type Sender struct {
confirmCh chan *Confirmation
stopCh chan struct{}
metrics *senderMetrics
}
// NewSender returns a new instance of transaction sender
// txConfirmationCh is used to notify confirmed transaction
func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.PrivateKey, service, name string, reg prometheus.Registerer) (*Sender, error) {
func NewSender(ctx context.Context, config *config.SenderConfig, privs []*ecdsa.PrivateKey) (*Sender, error) {
client, err := ethclient.Dial(config.Endpoint)
if err != nil {
return nil, fmt.Errorf("failed to dial eth client, err: %w", err)
return nil, err
}
// get chainID from client
chainID, err := client.ChainID(ctx)
if err != nil {
return nil, fmt.Errorf("failed to get chain ID, err: %w", err)
return nil, err
}
auth, err := bind.NewKeyedTransactorWithChainID(priv, chainID)
auths, err := newAccountPool(ctx, config.MinBalance, client, privs)
if err != nil {
return nil, fmt.Errorf("failed to create transactor with chain ID %v, err: %w", chainID, err)
return nil, fmt.Errorf("failed to create account pool, err: %v", err)
}
// Set pending nonce
nonce, err := client.PendingNonceAt(ctx, auth.From)
if err != nil {
return nil, fmt.Errorf("failed to get pending nonce for address %s, err: %w", auth.From.Hex(), err)
}
auth.Nonce = big.NewInt(int64(nonce))
// get header by number
header, err := client.HeaderByNumber(ctx, nil)
if err != nil {
return nil, fmt.Errorf("failed to get header by number, err: %w", err)
return nil, err
}
var baseFeePerGas uint64
@@ -124,26 +116,27 @@ func NewSender(ctx context.Context, config *config.SenderConfig, priv *ecdsa.Pri
if header.BaseFee != nil {
baseFeePerGas = header.BaseFee.Uint64()
} else {
return nil, errors.New("dynamic fee tx type not supported: header.BaseFee is nil")
return nil, errors.New("DynamicFeeTxType not supported, header.BaseFee nil")
}
}
// initialize pending limit with a default value
if config.PendingLimit == 0 {
config.PendingLimit = defaultPendingLimit
}
sender := &Sender{
ctx: ctx,
config: config,
client: client,
chainID: chainID,
auth: auth,
minBalance: config.MinBalance,
auths: auths,
confirmCh: make(chan *Confirmation, 128),
blockNumber: header.Number.Uint64(),
baseFeePerGas: baseFeePerGas,
pendingTxs: cmapV2.New[*PendingTransaction](),
stopCh: make(chan struct{}),
name: name,
service: service,
}
sender.metrics = initSenderMetrics(reg)
go sender.loop(ctx)
@@ -182,6 +175,11 @@ func (s *Sender) SendConfirmation(cfm *Confirmation) {
s.confirmCh <- cfm
}
// NumberOfAccounts return the count of accounts.
func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (*FeeData, error) {
if s.config.TxType == DynamicFeeTxType {
return s.estimateDynamicGas(auth, target, value, data, minGasLimit)
@@ -190,54 +188,53 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (common.Hash, error) {
s.metrics.sendTransactionTotal.WithLabelValues(s.service, s.name).Inc()
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, minGasLimit uint64) (hash common.Hash, err error) {
if s.IsFull() {
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(1)
return common.Hash{}, ErrFullPending
}
s.metrics.sendTransactionFailureFullTx.WithLabelValues(s.service, s.name).Set(0)
// We occupy the ID, in case some other threads call with the same ID in the same time
if ok := s.pendingTxs.SetIfAbsent(ID, nil); !ok {
s.metrics.sendTransactionFailureRepeatTransaction.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("repeat transaction ID: %s", ID)
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
}
// get
auth := s.auths.getAccount()
if auth == nil {
s.pendingTxs.Remove(ID) // release the ID on failure
return common.Hash{}, ErrNoAvailableAccount
}
var (
feeData *FeeData
tx *types.Transaction
err error
)
defer s.auths.releaseAccount(auth)
defer func() {
if err != nil {
s.pendingTxs.Remove(ID) // release the ID on failure
}
}()
if feeData, err = s.getFeeData(s.auth, target, value, data, minGasLimit); err != nil {
s.metrics.sendTransactionFailureGetFee.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("failed to get fee data, err: %w", err)
var (
feeData *FeeData
tx *types.Transaction
)
// estimate gas fee
if feeData, err = s.getFeeData(auth, target, value, data, minGasLimit); err != nil {
return
}
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
// add pending transaction to queue
pending := &PendingTransaction{
tx: tx,
id: ID,
signer: auth,
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Set(ID, pending)
return tx.Hash(), nil
}
if tx, err = s.createAndSendTx(s.auth, feeData, target, value, data, nil); err != nil {
s.metrics.sendTransactionFailureSendTx.WithLabelValues(s.service, s.name).Inc()
return common.Hash{}, fmt.Errorf("failed to create and send transaction, err: %w", err)
}
// add pending transaction
pending := &PendingTransaction{
tx: tx,
id: ID,
signer: s.auth,
submitAt: atomic.LoadUint64(&s.blockNumber),
feeData: feeData,
}
s.pendingTxs.Set(ID, pending)
return tx.Hash(), nil
return
}
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (*types.Transaction, error) {
func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, target *common.Address, value *big.Int, data []byte, overrideNonce *uint64) (tx *types.Transaction, err error) {
var (
nonce = auth.Nonce.Uint64()
txData types.TxData
@@ -295,50 +292,26 @@ func (s *Sender) createAndSendTx(auth *bind.TransactOpts, feeData *FeeData, targ
}
// sign and send
tx, err := auth.Signer(auth.From, types.NewTx(txData))
tx, err = auth.Signer(auth.From, types.NewTx(txData))
if err != nil {
log.Error("failed to sign tx", "err", err)
return nil, err
return
}
if err = s.client.SendTransaction(s.ctx, tx); err != nil {
log.Error("failed to send tx", "tx hash", tx.Hash().String(), "err", err)
// Check if contain nonce, and reset nonce
// only reset nonce when it is not from resubmit
if strings.Contains(err.Error(), "nonce") && overrideNonce == nil {
s.resetNonce(context.Background())
s.auths.resetNonce(context.Background(), auth)
}
return nil, err
return
}
if feeData.gasTipCap != nil {
s.metrics.currentGasTipCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasTipCap.Uint64()))
}
if feeData.gasFeeCap != nil {
s.metrics.currentGasFeeCap.WithLabelValues(s.service, s.name).Set(float64(feeData.gasFeeCap.Uint64()))
}
if feeData.gasPrice != nil {
s.metrics.currentGasPrice.WithLabelValues(s.service, s.name).Set(float64(feeData.gasPrice.Uint64()))
}
s.metrics.currentGasLimit.WithLabelValues(s.service, s.name).Set(float64(feeData.gasLimit))
// update nonce when it is not from resubmit
if overrideNonce == nil {
auth.Nonce = big.NewInt(int64(nonce + 1))
}
return tx, nil
}
// reSetNonce reset nonce if send signed tx failed.
func (s *Sender) resetNonce(ctx context.Context) {
nonce, err := s.client.PendingNonceAt(ctx, s.auth.From)
if err != nil {
log.Warn("failed to reset nonce", "address", s.auth.From.String(), "err", err)
return
}
s.auth.Nonce = big.NewInt(int64(nonce))
return
}
func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts, tx *types.Transaction) (*types.Transaction, error) {
@@ -346,15 +319,8 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
escalateMultipleDen := new(big.Int).SetUint64(s.config.EscalateMultipleDen)
maxGasPrice := new(big.Int).SetUint64(s.config.MaxGasPrice)
txInfo := map[string]interface{}{
"tx_hash": tx.Hash().String(),
"tx_type": s.config.TxType,
"from": auth.From.String(),
}
switch s.config.TxType {
case LegacyTxType, AccessListTxType: // `LegacyTxType`is for ganache mock node
originalGasPrice := feeData.gasPrice
gasPrice := escalateMultipleNum.Mul(escalateMultipleNum, big.NewInt(feeData.gasPrice.Int64()))
gasPrice = gasPrice.Div(gasPrice, escalateMultipleDen)
if gasPrice.Cmp(feeData.gasPrice) < 0 {
@@ -364,13 +330,7 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
gasPrice = maxGasPrice
}
feeData.gasPrice = gasPrice
txInfo["original_gas_price"] = originalGasPrice
txInfo["adjusted_gas_price"] = gasPrice
default:
originalGasTipCap := big.NewInt(feeData.gasTipCap.Int64())
originalGasFeeCap := big.NewInt(feeData.gasFeeCap.Int64())
gasTipCap := big.NewInt(feeData.gasTipCap.Int64())
gasTipCap = gasTipCap.Mul(gasTipCap, escalateMultipleNum)
gasTipCap = gasTipCap.Div(gasTipCap, escalateMultipleDen)
@@ -402,17 +362,9 @@ func (s *Sender) resubmitTransaction(feeData *FeeData, auth *bind.TransactOpts,
}
feeData.gasFeeCap = gasFeeCap
feeData.gasTipCap = gasTipCap
txInfo["original_gas_tip_cap"] = originalGasTipCap
txInfo["adjusted_gas_tip_cap"] = gasTipCap
txInfo["original_gas_fee_cap"] = originalGasFeeCap
txInfo["adjusted_gas_fee_cap"] = gasFeeCap
}
log.Debug("Transaction gas adjustment details", txInfo)
nonce := tx.Nonce()
s.metrics.resubmitTransactionTotal.WithLabelValues(s.service, s.name).Inc()
return s.createAndSendTx(auth, feeData, tx.To(), tx.Value(), tx.Data(), &nonce)
}
@@ -449,12 +401,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
} else if s.config.EscalateBlocks+pending.submitAt < number {
log.Debug("resubmit transaction",
"tx hash", pending.tx.Hash().String(),
"submit block number", pending.submitAt,
"current block number", number,
"escalateBlocks", s.config.EscalateBlocks)
var tx *types.Transaction
tx, err := s.resubmitTransaction(pending.feeData, pending.signer, pending.tx)
if err != nil {
@@ -502,22 +448,6 @@ func (s *Sender) checkPendingTransaction(header *types.Header, confirmed uint64)
}
}
// checkBalance checks balance and print error log if balance is under a threshold.
func (s *Sender) checkBalance(ctx context.Context) error {
bls, err := s.client.BalanceAt(ctx, s.auth.From, nil)
if err != nil {
log.Warn("failed to get balance", "address", s.auth.From.String(), "err", err)
return err
}
if bls.Cmp(s.minBalance) < 0 {
return fmt.Errorf("insufficient account balance - actual balance: %s, minimum required balance: %s, address: %s",
bls.String(), s.minBalance.String(), s.auth.From.String())
}
return nil
}
// Loop is the main event loop
func (s *Sender) loop(ctx context.Context) {
checkTick := time.NewTicker(time.Duration(s.config.CheckPendingTime) * time.Second)
@@ -529,7 +459,6 @@ func (s *Sender) loop(ctx context.Context) {
for {
select {
case <-checkTick.C:
s.metrics.senderCheckPendingTransactionTotal.WithLabelValues(s.service, s.name).Inc()
header, err := s.client.HeaderByNumber(s.ctx, nil)
if err != nil {
log.Error("failed to get latest head", "err", err)
@@ -544,11 +473,8 @@ func (s *Sender) loop(ctx context.Context) {
s.checkPendingTransaction(header, confirmed)
case <-checkBalanceTicker.C:
s.metrics.senderCheckBalancerTotal.WithLabelValues(s.service, s.name).Inc()
// Check and set balance.
if err := s.checkBalance(ctx); err != nil {
log.Error("check balance error", "err", err)
}
_ = s.auths.checkAndSetBalances(ctx)
case <-ctx.Done():
return
case <-s.stopCh:

View File

@@ -7,8 +7,10 @@ import (
"math/big"
"strconv"
"testing"
"time"
"github.com/agiledragon/gomonkey/v2"
cmap "github.com/orcaman/concurrent-map"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -16,6 +18,7 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"scroll-tech/common/docker"
@@ -25,10 +28,10 @@ import (
const TXBatch = 50
var (
privateKey *ecdsa.PrivateKey
cfg *config.Config
base *docker.App
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
privateKeys []*ecdsa.PrivateKey
cfg *config.Config
base *docker.App
txTypes = []string{"LegacyTx", "AccessListTx", "DynamicFeeTx"}
)
func TestMain(m *testing.M) {
@@ -47,7 +50,7 @@ func setupEnv(t *testing.T) {
priv, err := crypto.HexToECDSA("1212121212121212121212121212121212121212121212121212121212121212")
assert.NoError(t, err)
// Load default private key.
privateKey = priv
privateKeys = []*ecdsa.PrivateKey{priv}
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = base.L1gethImg.Endpoint()
cfg.L1Config.RelayerConfig.SenderConfig.CheckBalanceTime = 1
@@ -59,10 +62,15 @@ func TestSender(t *testing.T) {
t.Run("test new sender", testNewSender)
t.Run("test pending limit", testPendLimit)
t.Run("test min gas limit", testMinGasLimit)
t.Run("test resubmit transaction", testResubmitTransaction)
t.Run("test resubmit transaction with rising base fee", testResubmitTransactionWithRisingBaseFee)
t.Run("test check pending transaction", testCheckPendingTransaction)
t.Run("test 1 account sender", func(t *testing.T) { testBatchSender(t, 1) })
t.Run("test 3 account sender", func(t *testing.T) { testBatchSender(t, 3) })
t.Run("test 8 account sender", func(t *testing.T) { testBatchSender(t, 8) })
}
func testNewSender(t *testing.T) {
@@ -70,7 +78,7 @@ func testNewSender(t *testing.T) {
// exit by Stop()
cfgCopy1 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy1.TxType = txType
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKey, "test", "test", nil)
newSender1, err := NewSender(context.Background(), &cfgCopy1, privateKeys)
assert.NoError(t, err)
newSender1.Stop()
@@ -78,7 +86,7 @@ func testNewSender(t *testing.T) {
cfgCopy2 := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy2.TxType = txType
subCtx, cancel := context.WithCancel(context.Background())
_, err = NewSender(subCtx, &cfgCopy2, privateKey, "test", "test", nil)
_, err = NewSender(subCtx, &cfgCopy2, privateKeys)
assert.NoError(t, err)
cancel()
}
@@ -90,7 +98,7 @@ func testPendLimit(t *testing.T) {
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = 2
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
for i := 0; i < 2*newSender.PendingLimit(); i++ {
@@ -107,7 +115,7 @@ func testMinGasLimit(t *testing.T) {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
cfgCopy.Confirmations = rpc.LatestBlockNumber
newSender, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
client, err := ethclient.Dial(cfgCopy.Endpoint)
@@ -135,12 +143,13 @@ func testResubmitTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
_, err = s.resubmitTransaction(feeData, s.auth, tx)
_, err = s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
s.Stop()
}
@@ -151,16 +160,17 @@ func testResubmitTransactionWithRisingBaseFee(t *testing.T) {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
s.baseFeePerGas = 1000
feeData, err := s.getFeeData(s.auth, &common.Address{}, big.NewInt(0), nil, 0)
feeData, err := s.getFeeData(auth, &common.Address{}, big.NewInt(0), nil, 0)
assert.NoError(t, err)
// bump the basefee by 10x
s.baseFeePerGas *= 10
// resubmit and check that the gas fee has been adjusted accordingly
newTx, err := s.resubmitTransaction(feeData, s.auth, tx)
newTx, err := s.resubmitTransaction(feeData, auth, tx)
assert.NoError(t, err)
escalateMultipleNum := new(big.Int).SetUint64(s.config.EscalateMultipleNum)
@@ -186,13 +196,14 @@ func testCheckPendingTransaction(t *testing.T) {
for _, txType := range txTypes {
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.TxType = txType
s, err := NewSender(context.Background(), &cfgCopy, privateKey, "test", "test", nil)
s, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
header := &types.Header{Number: big.NewInt(100), BaseFee: big.NewInt(100)}
confirmed := uint64(100)
receipt := &types.Receipt{Status: types.ReceiptStatusSuccessful, BlockNumber: big.NewInt(90)}
tx := types.NewTransaction(s.auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
auth := s.auths.getAccount()
tx := types.NewTransaction(auth.Nonce.Uint64(), common.Address{}, big.NewInt(0), 0, big.NewInt(0), nil)
testCases := []struct {
name string
@@ -269,3 +280,66 @@ func testCheckPendingTransaction(t *testing.T) {
s.Stop()
}
}
func testBatchSender(t *testing.T, batchSize int) {
for _, txType := range txTypes {
for len(privateKeys) < batchSize {
priv, err := crypto.GenerateKey()
assert.NoError(t, err)
privateKeys = append(privateKeys, priv)
}
cfgCopy := *cfg.L1Config.RelayerConfig.SenderConfig
cfgCopy.Confirmations = rpc.LatestBlockNumber
cfgCopy.PendingLimit = batchSize * TXBatch
cfgCopy.TxType = txType
newSender, err := NewSender(context.Background(), &cfgCopy, privateKeys)
assert.NoError(t, err)
// send transactions
var (
eg errgroup.Group
idCache = cmap.New()
confirmCh = newSender.ConfirmChan()
)
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
if errors.Is(err, ErrNoAvailableAccount) || errors.Is(err, ErrFullPending) {
<-time.After(time.Second)
continue
}
assert.NoError(t, err)
idCache.Set(id, struct{}{})
}
return nil
})
}
assert.NoError(t, eg.Wait())
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)
isDone := false
for !isDone {
select {
case cmsg := <-confirmCh:
assert.Equal(t, true, cmsg.IsSuccessful)
_, exist := idCache.Pop(cmsg.ID)
assert.Equal(t, true, exist)
// Receive all confirmed txs.
if idCache.Count() == 0 {
isDone = true
}
case <-after:
t.Error("newSender test failed because of timeout")
isDone = true
}
}
newSender.Stop()
}
}

View File

@@ -5,8 +5,6 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -28,28 +26,13 @@ type BatchProposer struct {
maxChunkNumPerBatch uint64
maxL1CommitGasPerBatch uint64
maxL1CommitCalldataSizePerBatch uint32
minChunkNumPerBatch uint64
batchTimeoutSec uint64
gasCostIncreaseMultiplier float64
batchProposerCircleTotal prometheus.Counter
proposeBatchFailureTotal prometheus.Counter
proposeBatchUpdateInfoTotal prometheus.Counter
proposeBatchUpdateInfoFailureTotal prometheus.Counter
totalL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
batchChunksNum prometheus.Gauge
batchFirstBlockTimeoutReached prometheus.Counter
batchChunksProposeNotEnoughTotal prometheus.Counter
}
// NewBatchProposer creates a new BatchProposer instance.
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB, reg prometheus.Registerer) *BatchProposer {
log.Debug("new batch proposer",
"maxChunkNumPerBatch", cfg.MaxChunkNumPerBatch,
"maxL1CommitGasPerBatch", cfg.MaxL1CommitGasPerBatch,
"maxL1CommitCalldataSizePerBatch", cfg.MaxL1CommitCalldataSizePerBatch,
"batchTimeoutSec", cfg.BatchTimeoutSec)
func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *gorm.DB) *BatchProposer {
return &BatchProposer{
ctx: ctx,
db: db,
@@ -59,65 +42,25 @@ func NewBatchProposer(ctx context.Context, cfg *config.BatchProposerConfig, db *
maxChunkNumPerBatch: cfg.MaxChunkNumPerBatch,
maxL1CommitGasPerBatch: cfg.MaxL1CommitGasPerBatch,
maxL1CommitCalldataSizePerBatch: cfg.MaxL1CommitCalldataSizePerBatch,
minChunkNumPerBatch: cfg.MinChunkNumPerBatch,
batchTimeoutSec: cfg.BatchTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
batchProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_failure_circle_total",
Help: "Total number of propose batch total.",
}),
proposeBatchUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_total",
Help: "Total number of propose batch update info total.",
}),
proposeBatchUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_update_info_failure_total",
Help: "Total number of propose batch update info failure total.",
}),
totalL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_commit_gas",
Help: "The total l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_total_l1_call_data_size",
Help: "The total l1 call data size",
}),
batchChunksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_batch_chunks_number",
Help: "The number of chunks in the batch",
}),
batchFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_first_block_timeout_reached_total",
Help: "Total times of batch's first block timeout reached",
}),
batchChunksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_batch_chunks_propose_not_enough_total",
Help: "Total number of batch chunk propose not enough",
}),
}
}
// TryProposeBatch tries to propose a new batches.
func (p *BatchProposer) TryProposeBatch() {
p.batchProposerCircleTotal.Inc()
dbChunks, err := p.proposeBatchChunks()
if err != nil {
p.proposeBatchFailureTotal.Inc()
log.Error("proposeBatchChunks failed", "err", err)
return
}
if err := p.updateBatchInfoInDB(dbChunks); err != nil {
p.proposeBatchUpdateInfoFailureTotal.Inc()
log.Error("update batch info in db failed", "err", err)
}
}
func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
p.proposeBatchUpdateInfoTotal.Inc()
numChunks := len(dbChunks)
if numChunks <= 0 {
return nil
@@ -134,12 +77,10 @@ func (p *BatchProposer) updateBatchInfoInDB(dbChunks []*orm.Chunk) error {
err = p.db.Transaction(func(dbTX *gorm.DB) error {
batch, dbErr := p.batchOrm.InsertBatch(p.ctx, startChunkIndex, endChunkIndex, startChunkHash, endChunkHash, chunks, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.updateBatchInfoInDB insert batch failure", "error", "start chunk index", startChunkIndex, "end chunk index", endChunkIndex, dbErr)
return dbErr
}
dbErr = p.chunkOrm.UpdateBatchHashInRange(p.ctx, startChunkIndex, endChunkIndex, batch.Hash, dbTX)
if dbErr != nil {
log.Warn("BatchProposer.UpdateBatchHashInRange update the chunk's batch hash failure", "hash", batch.Hash, "error", dbErr)
return dbErr
}
return nil
@@ -157,86 +98,83 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
return nil, nil
}
var totalL1CommitCalldataSize uint32
var totalL1CommitGas uint64
var totalChunks uint64
var totalL1MessagePopped uint64
firstChunk := dbChunks[0]
totalL1CommitCalldataSize := firstChunk.TotalL1CommitCalldataSize
totalL1CommitGas := firstChunk.TotalL1CommitGas
totalChunks := uint64(1)
totalL1MessagePopped := firstChunk.TotalL1MessagesPoppedBefore + uint64(firstChunk.TotalL1MessagesPoppedInChunk)
parentBatch, err := p.batchOrm.GetLatestBatch(p.ctx)
if err != nil {
return nil, err
}
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// Add extra gas costs
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += types.CalldataNonZeroByteGas // version in calldata
totalL1CommitGas += 4 * 2100 // 4 one-time cold sload for commitBatch
totalL1CommitGas += 20000 // 1 time sstore
totalL1CommitGas += 16 // version in calldata
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256) // _skippedL1MessageBitmap in calldata
// adjusting gas:
// add 1 time cold sload (2100 gas) for L1MessageQueue
// add 1 time cold address access (2600 gas) for L1MessageQueue
// minus 1 time warm sload (100 gas) & 1 time warm address access (100 gas)
totalL1CommitGas += (2100 + 2600 - 100 - 100)
totalL1CommitGas += getKeccakGas(32 * totalChunks) // batch data hash
if parentBatch != nil {
totalL1CommitGas += types.GetKeccak256Gas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += types.CalldataNonZeroByteGas * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
totalL1CommitGas += getKeccakGas(uint64(len(parentBatch.BatchHeader))) // parent batch header hash
totalL1CommitGas += 16 * uint64(len(parentBatch.BatchHeader)) // parent batch header in calldata
}
// batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
for i, chunk := range dbChunks {
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
firstChunk.StartBlockNumber,
firstChunk.EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
for i, chunk := range dbChunks[1:] {
totalL1CommitCalldataSize += chunk.TotalL1CommitCalldataSize
totalL1CommitGas += chunk.TotalL1CommitGas
// adjust batch data hash gas cost
totalL1CommitGas -= types.GetKeccak256Gas(32 * totalChunks)
totalL1CommitGas -= getKeccakGas(32 * totalChunks)
totalChunks++
totalL1CommitGas += types.GetKeccak256Gas(32 * totalChunks)
// adjust batch header hash gas cost, batch header size: 89 + 32 * ceil(l1MessagePopped / 256)
totalL1CommitGas -= types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(32 * totalChunks)
// adjust batch header hash gas cost
totalL1CommitGas -= getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
totalL1CommitGas -= 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1MessagePopped += uint64(chunk.TotalL1MessagesPoppedInChunk)
totalL1CommitGas += types.CalldataNonZeroByteGas * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += types.GetKeccak256Gas(89 + 32*(totalL1MessagePopped+255)/256)
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
totalL1CommitGas += 16 * (32 * (totalL1MessagePopped + 255) / 256)
totalL1CommitGas += getKeccakGas(89 + 32*(totalL1MessagePopped+255)/256)
if totalChunks > p.maxChunkNumPerBatch ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
// Check if the first chunk breaks hard limits.
// If so, it indicates there are bugs in chunk-proposer, manual fix is needed.
if i == 0 {
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit gas limit; start block number: %v, end block number: %v, commit gas: %v, max commit gas limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitGas,
p.maxL1CommitGasPerBatch,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerBatch {
return nil, fmt.Errorf(
"the first chunk exceeds l1 commit calldata size limit; start block number: %v, end block number %v, calldata size: %v, max calldata size limit: %v",
dbChunks[0].StartBlockNumber,
dbChunks[0].EndBlockNumber,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerBatch,
)
}
}
log.Debug("breaking limit condition in batching",
"currentTotalChunks", totalChunks,
"maxChunkNumPerBatch", p.maxChunkNumPerBatch,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitCalldataSizePerBatch", p.maxL1CommitCalldataSizePerBatch,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitGasPerBatch", p.maxL1CommitGasPerBatch)
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks[:i], nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerBatch) {
return dbChunks[:i+1], nil
}
}
var hasChunkTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if dbChunks[0].StartBlockTime+p.batchTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -244,16 +182,16 @@ func (p *BatchProposer) proposeBatchChunks() ([]*orm.Chunk, error) {
"first block timestamp", dbChunks[0].StartBlockTime,
"chunk outdated time threshold", currentTimeSec,
)
p.batchFirstBlockTimeoutReached.Inc()
p.totalL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.batchChunksNum.Set(float64(len(dbChunks)))
return dbChunks, nil
hasChunkTimeout = true
}
log.Debug("pending chunks do not reach one of the constraints or contain a timeout block")
p.batchChunksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasChunkTimeout && uint64(len(dbChunks)) < p.minChunkNumPerBatch {
log.Warn("The chunk number of the batch is less than the minimum limit",
"chunk num", len(dbChunks), "minChunkNumPerBatch", p.minChunkNumPerBatch,
)
return nil, nil
}
return dbChunks, nil
}
func (p *BatchProposer) dbChunksToBridgeChunks(dbChunks []*orm.Chunk) ([]*types.Chunk, error) {

View File

@@ -23,20 +23,23 @@ func testBatchProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
bp := NewBatchProposer(context.Background(), &config.BatchProposerConfig{
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
}, db)
bp.TryProposeBatch()
chunkOrm := orm.NewChunk(db)

View File

@@ -6,8 +6,6 @@ import (
"fmt"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"gorm.io/gorm"
@@ -51,111 +49,43 @@ type ChunkProposer struct {
chunkOrm *orm.Chunk
l2BlockOrm *orm.L2Block
maxTxGasPerChunk uint64
maxL2TxNumPerChunk uint64
maxL1CommitGasPerChunk uint64
maxL1CommitCalldataSizePerChunk uint64
minL1CommitCalldataSizePerChunk uint64
maxRowConsumptionPerChunk uint64
chunkTimeoutSec uint64
gasCostIncreaseMultiplier float64
chunkProposerCircleTotal prometheus.Counter
proposeChunkFailureTotal prometheus.Counter
proposeChunkUpdateInfoTotal prometheus.Counter
proposeChunkUpdateInfoFailureTotal prometheus.Counter
chunkL2TxNum prometheus.Gauge
chunkEstimateL1CommitGas prometheus.Gauge
totalL1CommitCalldataSize prometheus.Gauge
totalTxGasUsed prometheus.Gauge
maxTxConsumption prometheus.Gauge
chunkBlocksNum prometheus.Gauge
chunkFirstBlockTimeoutReached prometheus.Counter
chunkBlocksProposeNotEnoughTotal prometheus.Counter
}
// NewChunkProposer creates a new ChunkProposer instance.
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB, reg prometheus.Registerer) *ChunkProposer {
log.Debug("new chunk proposer",
"maxL2TxNumPerChunk", cfg.MaxL2TxNumPerChunk,
"maxL1CommitGasPerChunk", cfg.MaxL1CommitGasPerChunk,
"maxL1CommitCalldataSizePerChunk", cfg.MaxL1CommitCalldataSizePerChunk,
"maxRowConsumptionPerChunk", cfg.MaxRowConsumptionPerChunk,
"chunkTimeoutSec", cfg.ChunkTimeoutSec)
func NewChunkProposer(ctx context.Context, cfg *config.ChunkProposerConfig, db *gorm.DB) *ChunkProposer {
return &ChunkProposer{
ctx: ctx,
db: db,
chunkOrm: orm.NewChunk(db),
l2BlockOrm: orm.NewL2Block(db),
maxTxGasPerChunk: cfg.MaxTxGasPerChunk,
maxL2TxNumPerChunk: cfg.MaxL2TxNumPerChunk,
maxL1CommitGasPerChunk: cfg.MaxL1CommitGasPerChunk,
maxL1CommitCalldataSizePerChunk: cfg.MaxL1CommitCalldataSizePerChunk,
minL1CommitCalldataSizePerChunk: cfg.MinL1CommitCalldataSizePerChunk,
maxRowConsumptionPerChunk: cfg.MaxRowConsumptionPerChunk,
chunkTimeoutSec: cfg.ChunkTimeoutSec,
gasCostIncreaseMultiplier: cfg.GasCostIncreaseMultiplier,
chunkProposerCircleTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_circle_total",
Help: "Total number of propose chunk total.",
}),
proposeChunkFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_failure_circle_total",
Help: "Total number of propose chunk failure total.",
}),
proposeChunkUpdateInfoTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_total",
Help: "Total number of propose chunk update info total.",
}),
proposeChunkUpdateInfoFailureTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_update_info_failure_total",
Help: "Total number of propose chunk update info failure total.",
}),
chunkL2TxNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_l2_tx_num",
Help: "The chunk l2 tx num",
}),
chunkEstimateL1CommitGas: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_estimate_l1_commit_gas",
Help: "The chunk estimate l1 commit gas",
}),
totalL1CommitCalldataSize: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_l1_commit_call_data_size",
Help: "The total l1 commit call data size",
}),
totalTxGasUsed: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_total_tx_gas_used",
Help: "The total tx gas used",
}),
maxTxConsumption: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_max_tx_consumption",
Help: "The max tx consumption",
}),
chunkBlocksNum: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_propose_chunk_chunk_block_number",
Help: "The number of blocks in the chunk",
}),
chunkFirstBlockTimeoutReached: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_first_block_timeout_reached_total",
Help: "Total times of chunk's first block timeout reached",
}),
chunkBlocksProposeNotEnoughTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_propose_chunk_blocks_propose_not_enough_total",
Help: "Total number of chunk block propose not enough",
}),
}
}
// TryProposeChunk tries to propose a new chunk.
func (p *ChunkProposer) TryProposeChunk() {
p.chunkProposerCircleTotal.Inc()
proposedChunk, err := p.proposeChunk()
if err != nil {
p.proposeChunkFailureTotal.Inc()
log.Error("propose new chunk failed", "err", err)
return
}
if err := p.updateChunkInfoInDB(proposedChunk); err != nil {
p.proposeChunkUpdateInfoFailureTotal.Inc()
log.Error("update chunk info in orm failed", "err", err)
}
}
@@ -165,11 +95,9 @@ func (p *ChunkProposer) updateChunkInfoInDB(chunk *types.Chunk) error {
return nil
}
p.proposeChunkUpdateInfoTotal.Inc()
err := p.db.Transaction(func(dbTX *gorm.DB) error {
dbChunk, err := p.chunkOrm.InsertChunk(p.ctx, chunk, dbTX)
if err != nil {
log.Warn("ChunkProposer.InsertChunk failed", "chunk hash", chunk.Hash)
return err
}
if err := p.l2BlockOrm.UpdateChunkHashInRange(p.ctx, dbChunk.StartBlockNumber, dbChunk.EndBlockNumber, dbChunk.Hash, dbTX); err != nil {
@@ -191,91 +119,88 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
return nil, nil
}
var chunk types.Chunk
var totalTxGasUsed uint64
var totalL2TxNum uint64
var totalL1CommitCalldataSize uint64
var totalL1CommitGas uint64
chunk := &types.Chunk{Blocks: blocks[:1]}
firstBlock := chunk.Blocks[0]
totalTxGasUsed := firstBlock.Header.GasUsed
totalL2TxNum := firstBlock.L2TxsNum()
totalL1CommitCalldataSize := firstBlock.EstimateL1CommitCalldataSize()
crc := chunkRowConsumption{}
totalL1CommitGas := chunk.EstimateL1CommitGas()
for i, block := range blocks {
if err := crc.add(firstBlock.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
firstBlock.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
firstBlock.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
firstBlock.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
// Check if the first block breaks any soft limits.
if totalTxGasUsed > p.maxTxGasPerChunk {
log.Warn(
"The first block in chunk exceeds l2 tx gas limit",
"block number", firstBlock.Header.Number,
"gas used", totalTxGasUsed,
"max gas limit", p.maxTxGasPerChunk,
)
}
if max := crc.max(); max > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
firstBlock.Header.Number,
crc,
max,
p.maxRowConsumptionPerChunk,
)
}
for _, block := range blocks[1:] {
chunk.Blocks = append(chunk.Blocks, block)
totalTxGasUsed += block.Header.GasUsed
totalL2TxNum += block.L2TxsNum()
totalL1CommitCalldataSize += block.EstimateL1CommitCalldataSize()
totalL1CommitGas = chunk.EstimateL1CommitGas()
totalOverEstimateL1CommitGas := uint64(p.gasCostIncreaseMultiplier * float64(totalL1CommitGas))
if err := crc.add(block.RowConsumption); err != nil {
return nil, fmt.Errorf("chunk-proposer failed to update chunk row consumption: %v", err)
}
crcMax := crc.max()
if totalL2TxNum > p.maxL2TxNumPerChunk ||
if totalTxGasUsed > p.maxTxGasPerChunk ||
totalL2TxNum > p.maxL2TxNumPerChunk ||
totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk ||
totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk ||
crcMax > p.maxRowConsumptionPerChunk {
// Check if the first block breaks hard limits.
// If so, it indicates there are bugs in sequencer, manual fix is needed.
if i == 0 {
if totalL2TxNum > p.maxL2TxNumPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l2 tx number limit; block number: %v, number of transactions: %v, max transaction number limit: %v",
block.Header.Number,
totalL2TxNum,
p.maxL2TxNumPerChunk,
)
}
if totalOverEstimateL1CommitGas > p.maxL1CommitGasPerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit gas limit; block number: %v, commit gas: %v, max commit gas limit: %v",
block.Header.Number,
totalL1CommitGas,
p.maxL1CommitGasPerChunk,
)
}
if totalL1CommitCalldataSize > p.maxL1CommitCalldataSizePerChunk {
return nil, fmt.Errorf(
"the first block exceeds l1 commit calldata size limit; block number: %v, calldata size: %v, max calldata size limit: %v",
block.Header.Number,
totalL1CommitCalldataSize,
p.maxL1CommitCalldataSizePerChunk,
)
}
if crcMax > p.maxRowConsumptionPerChunk {
return nil, fmt.Errorf(
"the first block exceeds row consumption limit; block number: %v, row consumption: %v, max: %v, limit: %v",
block.Header.Number,
crc,
crcMax,
p.maxRowConsumptionPerChunk,
)
}
}
log.Debug("breaking limit condition in chunking",
"totalL2TxNum", totalL2TxNum,
"maxL2TxNumPerChunk", p.maxL2TxNumPerChunk,
"currentL1CommitCalldataSize", totalL1CommitCalldataSize,
"maxL1CommitGasPerChunk", p.maxL1CommitGasPerChunk,
"currentOverEstimateL1CommitGas", totalOverEstimateL1CommitGas,
"maxL1CommitCalldataSizePerChunk", p.maxL1CommitCalldataSizePerChunk,
"chunkRowConsumptionMax", crcMax,
"chunkRowConsumption", crc,
"p.maxRowConsumptionPerChunk", p.maxRowConsumptionPerChunk)
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crcMax))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
p.gasCostIncreaseMultiplier*float64(totalL1CommitGas) > float64(p.maxL1CommitGasPerChunk) ||
crc.max() > p.maxRowConsumptionPerChunk {
chunk.Blocks = chunk.Blocks[:len(chunk.Blocks)-1] // remove the last block from chunk
break
}
chunk.Blocks = append(chunk.Blocks, block)
}
var hasBlockTimeout bool
currentTimeSec := uint64(time.Now().Unix())
if blocks[0].Header.Time+p.chunkTimeoutSec < currentTimeSec {
log.Warn("first block timeout",
@@ -283,17 +208,15 @@ func (p *ChunkProposer) proposeChunk() (*types.Chunk, error) {
"block timestamp", blocks[0].Header.Time,
"block outdated time threshold", currentTimeSec,
)
p.chunkFirstBlockTimeoutReached.Inc()
p.chunkL2TxNum.Set(float64(totalL2TxNum))
p.chunkEstimateL1CommitGas.Set(float64(totalL1CommitGas))
p.totalL1CommitCalldataSize.Set(float64(totalL1CommitCalldataSize))
p.maxTxConsumption.Set(float64(crc.max()))
p.totalTxGasUsed.Set(float64(totalTxGasUsed))
p.chunkBlocksNum.Set(float64(len(chunk.Blocks)))
return &chunk, nil
hasBlockTimeout = true
}
log.Debug("pending blocks do not reach one of the constraints or contain a timeout block")
p.chunkBlocksProposeNotEnoughTotal.Inc()
return nil, nil
if !hasBlockTimeout && totalL1CommitCalldataSize < p.minL1CommitCalldataSizePerChunk {
log.Warn("The calldata size of the chunk is less than the minimum limit",
"totalL1CommitCalldataSize", totalL1CommitCalldataSize,
"minL1CommitCalldataSizePerChunk", p.minL1CommitCalldataSizePerChunk,
)
return nil, nil
}
return chunk, nil
}

View File

@@ -23,12 +23,14 @@ func testChunkProposer(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
expectedChunk := &types.Chunk{
@@ -53,12 +55,14 @@ func testChunkProposerRowConsumption(t *testing.T) {
assert.NoError(t, err)
cp := NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 0, // !
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)

View File

@@ -4,7 +4,6 @@ import (
"context"
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -12,9 +11,11 @@ import (
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -22,6 +23,12 @@ import (
"scroll-tech/bridge/internal/utils"
)
var (
bridgeL1MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l1/msgs/sync/height", metrics.ScrollRegistry)
bridgeL1MsgsSentEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/sent/events/total", metrics.ScrollRegistry)
bridgeL1MsgsRollupEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l1/msgs/rollup/events/total", metrics.ScrollRegistry)
)
type rollupEvent struct {
batchHash common.Hash
txHash common.Hash
@@ -52,12 +59,10 @@ type L1WatcherClient struct {
processedMsgHeight uint64
// The height of the block that the watcher has retrieved header rlp
processedBlockHeight uint64
metrics *l1WatcherMetrics
}
// NewL1WatcherClient returns a new instance of L1WatcherClient.
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB, reg prometheus.Registerer) *L1WatcherClient {
func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeight uint64, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress, scrollChainAddress common.Address, db *gorm.DB) *L1WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
savedHeight, err := l1MessageOrm.GetLayer1LatestWatchedHeight()
if err != nil {
@@ -97,7 +102,6 @@ func NewL1WatcherClient(ctx context.Context, client *ethclient.Client, startHeig
processedMsgHeight: uint64(savedHeight),
processedBlockHeight: savedL1BlockHeight,
metrics: initL1WatcherMetrics(reg),
}
}
@@ -121,7 +125,6 @@ func (w *L1WatcherClient) SetConfirmations(confirmations rpc.BlockNumber) {
// FetchBlockHeader pull latest L1 blocks and save in DB
func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
w.metrics.l1WatcherFetchBlockHeaderTotal.Inc()
fromBlock := int64(w.processedBlockHeight) + 1
toBlock := int64(blockHeight)
if toBlock < fromBlock {
@@ -168,7 +171,6 @@ func (w *L1WatcherClient) FetchBlockHeader(blockHeight uint64) error {
// update processed height
w.processedBlockHeight = uint64(toBlock)
w.metrics.l1WatcherFetchBlockHeaderProcessedBlockHeight.Set(float64(w.processedBlockHeight))
return nil
}
@@ -187,7 +189,6 @@ func (w *L1WatcherClient) FetchContractEvent() error {
toBlock := int64(blockHeight)
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
w.metrics.l1WatcherFetchContractEventTotal.Inc()
to := from + contractEventsBlocksFetchLimit - 1
if to > toBlock {
@@ -219,10 +220,9 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(to))
bridgeL1MsgsSyncHeightGauge.Update(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
@@ -232,8 +232,8 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
sentMessageCount := int64(len(sentMessageEvents))
rollupEventCount := int64(len(rollupEvents))
w.metrics.l1WatcherFetchContractEventSentEventsTotal.Add(float64(sentMessageCount))
w.metrics.l1WatcherFetchContractEventRollupEventsTotal.Add(float64(rollupEventCount))
bridgeL1MsgsSentEventsTotalCounter.Inc(sentMessageCount)
bridgeL1MsgsRollupEventsTotalCounter.Inc(rollupEventCount)
log.Info("L1 events types", "SentMessageCount", sentMessageCount, "RollupEventCount", rollupEventCount)
// use rollup event to update rollup results db status
@@ -273,8 +273,7 @@ func (w *L1WatcherClient) FetchContractEvent() error {
}
w.processedMsgHeight = uint64(to)
w.metrics.l1WatcherFetchContractEventSuccessTotal.Inc()
w.metrics.l1WatcherFetchContractEventProcessedBlockHeight.Set(float64(w.processedMsgHeight))
bridgeL1MsgsSyncHeightGauge.Update(to)
}
return nil

View File

@@ -1,59 +0,0 @@
package watcher
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l1WatcherMetrics struct {
l1WatcherFetchBlockHeaderTotal prometheus.Counter
l1WatcherFetchBlockHeaderProcessedBlockHeight prometheus.Gauge
l1WatcherFetchContractEventTotal prometheus.Counter
l1WatcherFetchContractEventSuccessTotal prometheus.Counter
l1WatcherFetchContractEventProcessedBlockHeight prometheus.Gauge
l1WatcherFetchContractEventSentEventsTotal prometheus.Counter
l1WatcherFetchContractEventRollupEventsTotal prometheus.Counter
}
var (
initL1WatcherMetricOnce sync.Once
l1WatcherMetric *l1WatcherMetrics
)
func initL1WatcherMetrics(reg prometheus.Registerer) *l1WatcherMetrics {
initL1WatcherMetricOnce.Do(func() {
l1WatcherMetric = &l1WatcherMetrics{
l1WatcherFetchBlockHeaderTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_header_total",
Help: "The total number of l1 watcher fetch block header total",
}),
l1WatcherFetchBlockHeaderProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_header_processed_block_height",
Help: "The current processed block height of l1 watcher fetch block header",
}),
l1WatcherFetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_total",
Help: "The total number of l1 watcher fetch contract event total",
}),
l1WatcherFetchContractEventSuccessTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_success_total",
Help: "The total number of l1 watcher fetch contract event success total",
}),
l1WatcherFetchContractEventProcessedBlockHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_processed_block_height",
Help: "The current processed block height of l1 watcher fetch contract event",
}),
l1WatcherFetchContractEventSentEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_sent_event_total",
Help: "The current processed block height of l1 watcher fetch contract sent event",
}),
l1WatcherFetchContractEventRollupEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l1_watcher_fetch_block_contract_event_rollup_event_total",
Help: "The current processed block height of l1 watcher fetch contract rollup event",
}),
}
})
return l1WatcherMetric
}

View File

@@ -30,8 +30,7 @@ func setupL1Watcher(t *testing.T) (*L1WatcherClient, *gorm.DB) {
client, err := ethclient.Dial(base.L1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db, nil)
watcher := NewL1WatcherClient(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
assert.NoError(t, watcher.FetchContractEvent())
return watcher, db
}

View File

@@ -5,7 +5,6 @@ import (
"fmt"
"math/big"
"github.com/prometheus/client_golang/prometheus"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
@@ -14,9 +13,11 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/event"
"github.com/scroll-tech/go-ethereum/log"
gethMetrics "github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/rpc"
"gorm.io/gorm"
"scroll-tech/common/metrics"
"scroll-tech/common/types"
bridgeAbi "scroll-tech/bridge/abi"
@@ -24,6 +25,14 @@ import (
"scroll-tech/bridge/internal/utils"
)
// Metrics
var (
bridgeL2MsgsSyncHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/msgs/sync/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedHeightGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/height", metrics.ScrollRegistry)
bridgeL2BlocksFetchedGapGauge = gethMetrics.NewRegisteredGauge("bridge/l2/blocks/fetched/gap", metrics.ScrollRegistry)
bridgeL2MsgsRelayedEventsTotalCounter = gethMetrics.NewRegisteredCounter("bridge/l2/msgs/relayed/events/total", metrics.ScrollRegistry)
)
// L2WatcherClient provide APIs which support others to subscribe to various event from l2geth
type L2WatcherClient struct {
ctx context.Context
@@ -47,12 +56,10 @@ type L2WatcherClient struct {
processedMsgHeight uint64
stopped uint64
metrics *l2WatcherMetrics
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB, reg prometheus.Registerer) *L2WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations rpc.BlockNumber, messengerAddress, messageQueueAddress common.Address, withdrawTrieRootSlot common.Hash, db *gorm.DB) *L2WatcherClient {
l1MessageOrm := orm.NewL1Message(db)
var savedHeight uint64
l1msg, err := l1MessageOrm.GetLayer1LatestMessageWithLayer2Hash()
@@ -86,7 +93,6 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
withdrawTrieRootSlot: withdrawTrieRootSlot,
stopped: 0,
metrics: initL2WatcherMetrics(reg),
}
return &w
@@ -96,7 +102,6 @@ const blockTracesFetchLimit = uint64(10)
// TryFetchRunningMissingBlocks attempts to fetch and store block traces for any missing blocks.
func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
w.metrics.fetchRunningMissingBlocksTotal.Inc()
heightInDB, err := w.l2BlockOrm.GetL2BlocksLatestHeight(w.ctx)
if err != nil {
log.Error("failed to GetL2BlocksLatestHeight", "err", err)
@@ -115,8 +120,8 @@ func (w *L2WatcherClient) TryFetchRunningMissingBlocks(blockHeight uint64) {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
w.metrics.fetchRunningMissingBlocksHeight.Set(float64(to))
w.metrics.bridgeL2BlocksFetchedGap.Set(float64(blockHeight - to))
bridgeL2BlocksFetchedHeightGauge.Update(int64(to))
bridgeL2BlocksFetchedGapGauge.Update(int64(blockHeight - to))
}
}
@@ -194,7 +199,6 @@ func (w *L2WatcherClient) FetchContractEvent() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
w.metrics.fetchContractEventTotal.Inc()
blockHeight, err := utils.GetLatestConfirmedBlockNumber(w.ctx, w.Client, w.confirmations)
if err != nil {
log.Error("failed to get block number", "err", err)
@@ -234,7 +238,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
bridgeL2MsgsSyncHeightGauge.Update(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
@@ -246,7 +250,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
relayedMessageCount := int64(len(relayedMessageEvents))
w.metrics.bridgeL2MsgsRelayedEventsTotal.Add(float64(relayedMessageCount))
bridgeL2MsgsRelayedEventsTotalCounter.Inc(relayedMessageCount)
log.Info("L2 events types", "RelayedMessageCount", relayedMessageCount)
// Update relayed message first to make sure we don't forget to update submited message.
@@ -265,7 +269,7 @@ func (w *L2WatcherClient) FetchContractEvent() {
}
w.processedMsgHeight = uint64(to)
w.metrics.fetchContractEventHeight.Set(float64(to))
bridgeL2MsgsSyncHeightGauge.Update(to)
}
}

View File

@@ -1,54 +0,0 @@
package watcher
import (
"sync"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
type l2WatcherMetrics struct {
fetchRunningMissingBlocksTotal prometheus.Counter
fetchRunningMissingBlocksHeight prometheus.Gauge
fetchContractEventTotal prometheus.Counter
fetchContractEventHeight prometheus.Gauge
bridgeL2MsgsRelayedEventsTotal prometheus.Counter
bridgeL2BlocksFetchedGap prometheus.Gauge
}
var (
initL2WatcherMetricOnce sync.Once
l2WatcherMetric *l2WatcherMetrics
)
func initL2WatcherMetrics(reg prometheus.Registerer) *l2WatcherMetrics {
initL2WatcherMetricOnce.Do(func() {
l2WatcherMetric = &l2WatcherMetrics{
fetchRunningMissingBlocksTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_total",
Help: "The total number of l2 watcher fetch running missing blocks",
}),
fetchRunningMissingBlocksHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_running_missing_blocks_height",
Help: "The total number of l2 watcher fetch running missing blocks height",
}),
fetchContractEventTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_fetch_contract_events_total",
Help: "The total number of l2 watcher fetch contract events",
}),
fetchContractEventHeight: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_fetch_contract_height",
Help: "The total number of l2 watcher fetch contract height",
}),
bridgeL2MsgsRelayedEventsTotal: promauto.With(reg).NewCounter(prometheus.CounterOpts{
Name: "bridge_l2_watcher_msg_relayed_events_total",
Help: "The total number of l2 watcher msg relayed event",
}),
bridgeL2BlocksFetchedGap: promauto.With(reg).NewGauge(prometheus.GaugeOpts{
Name: "bridge_l2_watcher_blocks_fetched_gap",
Help: "The gap of l2 fetch",
}),
}
})
return l2WatcherMetric
}

View File

@@ -34,8 +34,7 @@ import (
func setupL2Watcher(t *testing.T) (*L2WatcherClient, *gorm.DB) {
db := setupDB(t)
l2cfg := cfg.L2Config
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress,
l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db, nil)
watcher := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.L2MessengerAddress, l2cfg.L2MessageQueueAddress, l2cfg.WithdrawTrieRootSlot, db)
return watcher, db
}
@@ -51,7 +50,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
l1cfg := cfg.L1Config
l1cfg.RelayerConfig.SenderConfig.Confirmations = rpc.LatestBlockNumber
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKey, "test", "test", nil)
newSender, err := sender.NewSender(context.Background(), l1cfg.RelayerConfig.SenderConfig, l1cfg.RelayerConfig.MessageSenderPrivateKeys)
assert.NoError(t, err)
// Create several transactions and commit to block
@@ -72,7 +71,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
_, db := setupL2Watcher(t)
defer database.CloseDB(db)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKey)
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, _, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
@@ -96,7 +95,7 @@ func testFetchRunningMissingBlocks(t *testing.T) {
func prepareWatcherClient(l2Cli *ethclient.Client, db *gorm.DB, contractAddr common.Address) *L2WatcherClient {
confirmations := rpc.LatestBlockNumber
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db, nil)
return NewL2WatcherClient(context.Background(), l2Cli, confirmations, contractAddr, contractAddr, common.Hash{}, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {

View File

@@ -311,7 +311,7 @@ func (o *Batch) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}

View File

@@ -201,7 +201,7 @@ func (o *Chunk) UpdateProvingStatus(ctx context.Context, hash string, status typ
updateFields["prover_assigned_at"] = time.Now()
case types.ProvingTaskUnassigned:
updateFields["prover_assigned_at"] = nil
case types.ProvingTaskVerified:
case types.ProvingTaskProved, types.ProvingTaskVerified:
updateFields["proved_at"] = time.Now()
}

View File

@@ -56,13 +56,13 @@ contract MockBridgeL1 {
/// @notice Emitted when a new batch is committed.
/// @param batchHash The hash of the batch.
event CommitBatch(uint256 indexed batchIndex, bytes32 indexed batchHash);
event CommitBatch(bytes32 indexed batchHash);
/// @notice Emitted when a batch is finalized.
/// @param batchHash The hash of the batch
/// @param stateRoot The state root on layer 2 after this batch.
/// @param withdrawRoot The merkle root on layer2 after this batch.
event FinalizeBatch(uint256 indexed batchIndex, bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
event FinalizeBatch(bytes32 indexed batchHash, bytes32 stateRoot, bytes32 withdrawRoot);
/***********
* Structs *
@@ -130,7 +130,7 @@ contract MockBridgeL1 {
function commitBatch(
uint8 /*version*/,
bytes calldata _parentBatchHeader,
bytes calldata /*parentBatchHeader*/,
bytes[] memory chunks,
bytes calldata /*skippedL1MessageBitmap*/
) external {
@@ -138,17 +138,6 @@ contract MockBridgeL1 {
uint256 _chunksLength = chunks.length;
require(_chunksLength > 0, "batch is empty");
// decode batch index
uint256 headerLength = _parentBatchHeader.length;
uint256 parentBatchPtr;
uint256 parentBatchIndex;
assembly {
parentBatchPtr := mload(0x40)
calldatacopy(parentBatchPtr, _parentBatchHeader.offset, headerLength)
mstore(0x40, add(parentBatchPtr, headerLength))
parentBatchIndex := shr(192, mload(add(parentBatchPtr, 1)))
}
uint256 dataPtr;
assembly {
dataPtr := mload(0x40)
@@ -180,29 +169,18 @@ contract MockBridgeL1 {
}
bytes32 _batchHash = BatchHeaderV0Codec.computeBatchHash(batchPtr, 89);
committedBatches[0] = _batchHash;
emit CommitBatch(parentBatchIndex + 1, _batchHash);
emit CommitBatch(_batchHash);
}
function finalizeBatchWithProof(
bytes calldata batchHeader,
bytes calldata /*batchHeader*/,
bytes32 /*prevStateRoot*/,
bytes32 postStateRoot,
bytes32 withdrawRoot,
bytes calldata /*aggrProof*/
) external {
// decode batch index
uint256 headerLength = batchHeader.length;
uint256 batchPtr;
uint256 batchIndex;
assembly {
batchPtr := mload(0x40)
calldatacopy(batchPtr, batchHeader.offset, headerLength)
mstore(0x40, add(batchPtr, headerLength))
batchIndex := shr(192, mload(add(batchPtr, 1)))
}
bytes32 _batchHash = committedBatches[0];
emit FinalizeBatch(batchIndex, _batchHash, postStateRoot, withdrawRoot);
emit FinalizeBatch(_batchHash, postStateRoot, withdrawRoot);
}
/**********************

View File

@@ -83,10 +83,10 @@ func setupEnv(t *testing.T) {
l2Cfg.Confirmations = 0
l2Cfg.RelayerConfig.SenderConfig.Confirmations = 0
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKey, base.L1gethImg.ChainID())
l1Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L2Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L1gethImg.ChainID())
assert.NoError(t, err)
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.MessageSenderPrivateKey, base.L2gethImg.ChainID())
l2Auth, err = bind.NewKeyedTransactorWithChainID(bridgeApp.Config.L1Config.RelayerConfig.MessageSenderPrivateKeys[0], base.L2gethImg.ChainID())
assert.NoError(t, err)
}

View File

@@ -26,14 +26,13 @@ func testImportL1GasPrice(t *testing.T) {
l1Cfg := bridgeApp.Config.L1Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
startHeight, err := l1Client.BlockNumber(context.Background())
assert.NoError(t, err)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0,
l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, startHeight-1, 0, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// fetch new blocks
number, err := l1Client.BlockNumber(context.Background())
@@ -68,7 +67,7 @@ func testImportL2GasPrice(t *testing.T) {
prepareContracts(t)
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// add fake chunk

View File

@@ -29,16 +29,14 @@ func testRelayL1MessageSucceed(t *testing.T) {
l2Cfg := bridgeApp.Config.L2Config
// Create L1Relayer
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig, nil)
l1Relayer, err := relayer.NewLayer1Relayer(context.Background(), db, l1Cfg.RelayerConfig)
assert.NoError(t, err)
// Create L1Watcher
confirmations := rpc.LatestBlockNumber
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// Create L2Watcher
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress,
l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db, nil)
l2Watcher := watcher.NewL2WatcherClient(context.Background(), l2Client, confirmations, l2Cfg.L2MessengerAddress, l2Cfg.L2MessageQueueAddress, l2Cfg.WithdrawTrieRootSlot, db)
// send message through l1 messenger contract
nonce, err := l1MessengerInstance.MessageNonce(&bind.CallOpts{})

View File

@@ -5,6 +5,7 @@ import (
"math/big"
"testing"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
gethTypes "github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
@@ -12,7 +13,6 @@ import (
"scroll-tech/common/database"
"scroll-tech/common/types"
"scroll-tech/common/types/message"
"scroll-tech/common/utils"
"scroll-tech/bridge/internal/config"
"scroll-tech/bridge/internal/controller/relayer"
@@ -28,13 +28,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create L2Relayer
l2Cfg := bridgeApp.Config.L2Config
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false, nil)
l2Relayer, err := relayer.NewLayer2Relayer(context.Background(), l2Client, db, l2Cfg.RelayerConfig, false)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := bridgeApp.Config.L1Config
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress,
l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db, nil)
l1Watcher := watcher.NewL1WatcherClient(context.Background(), l1Client, 0, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.L1MessageQueueAddress, l1Cfg.ScrollChainContractAddress, db)
// add some blocks to db
var wrappedBlocks []*types.WrappedBlock
@@ -58,12 +57,14 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NoError(t, err)
cp := watcher.NewChunkProposer(context.Background(), &config.ChunkProposerConfig{
MaxTxGasPerChunk: 1000000000,
MaxL2TxNumPerChunk: 10000,
MaxL1CommitGasPerChunk: 50000000000,
MaxL1CommitCalldataSizePerChunk: 1000000,
MinL1CommitCalldataSizePerChunk: 0,
MaxRowConsumptionPerChunk: 1048319,
ChunkTimeoutSec: 300,
}, db, nil)
}, db)
cp.TryProposeChunk()
chunkOrm := orm.NewChunk(db)
@@ -75,8 +76,9 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
MaxChunkNumPerBatch: 10,
MaxL1CommitGasPerBatch: 50000000000,
MaxL1CommitCalldataSizePerBatch: 1000000,
MinChunkNumPerBatch: 1,
BatchTimeoutSec: 300,
}, db, nil)
}, db)
bp.TryProposeBatch()
l2Relayer.ProcessPendingBatches()
@@ -89,22 +91,20 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NotEmpty(t, batch.CommitTxHash)
assert.Equal(t, types.RollupCommitting, types.RollupStatus(batch.RollupStatus))
success := utils.TryTimes(30, func() bool {
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.CommitTxHash))
return err == nil && receipt.Status == 1
})
assert.True(t, success)
assert.NoError(t, err)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.CommitTxHash))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
success = utils.TryTimes(30, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
return err == nil && len(statuses) == 1 && types.RollupCommitted == statuses[0]
})
assert.True(t, success)
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupCommitted, statuses[0])
// add dummy proof
proof := &message.BatchProof{
@@ -118,7 +118,7 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
statuses, err := batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalizing, statuses[0])
@@ -128,20 +128,17 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.NotNil(t, batch)
assert.NotEmpty(t, batch.FinalizeTxHash)
success = utils.TryTimes(30, func() bool {
var receipt *gethTypes.Receipt
receipt, err = l1Client.TransactionReceipt(context.Background(), common.HexToHash(batch.FinalizeTxHash))
return err == nil && receipt.Status == 1
})
assert.True(t, success)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(batch.FinalizeTxHash))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
success = utils.TryTimes(30, func() bool {
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
var statuses []types.RollupStatus
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
return err == nil && len(statuses) == 1 && types.RollupFinalized == statuses[0]
})
assert.True(t, success)
err = l1Watcher.FetchContractEvent()
assert.NoError(t, err)
statuses, err = batchOrm.GetRollupStatusByHashList(context.Background(), []string{batchHash})
assert.NoError(t, err)
assert.Equal(t, 1, len(statuses))
assert.Equal(t, types.RollupFinalized, statuses[0])
}

View File

@@ -37,7 +37,7 @@ COPY . .
RUN cp -r ./common/libzkp/interface ./coordinator/internal/logic/verifier/lib
COPY --from=zkp-builder /app/target/release/libzkp.so ./coordinator/internal/logic/verifier/lib/
COPY --from=zkp-builder /app/target/release/libzktrie.so ./coordinator/internal/logic/verifier/lib/
RUN cd ./coordinator && make coordinator_skip_libzkp && mv ./build/bin/coordinator /bin/coordinator && mv internal/logic/verifier/lib /bin/
RUN cd ./coordinator && go build -v -p 4 -o /bin/coordinator ./cmd && mv internal/logic/verifier/lib /bin/
# Pull coordinator into a second stage deploy alpine container
FROM ubuntu:20.04
@@ -46,6 +46,6 @@ ENV LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/src/coordinator/internal/logic/verifier/li
RUN mkdir -p /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/lib /src/coordinator/internal/logic/verifier/lib
COPY --from=builder /bin/coordinator /bin/
RUN /bin/coordinator --version
ENTRYPOINT ["/bin/coordinator"]

View File

@@ -1,31 +0,0 @@
# Download Go dependencies
FROM scrolltech/go-alpine-builder:1.19 as base
WORKDIR /src
COPY go.work* ./
COPY ./bridge/go.* ./bridge/
COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./prover-stats-api/go.* ./prover-stats-api/
COPY ./prover/go.* ./prover/
COPY ./tests/integration-test/go.* ./tests/integration-test/
COPY ./bridge-history-api/go.* ./bridge-history-api/
# Support mainland environment.
#ENV GOPROXY="https://goproxy.cn,direct"
RUN go mod download -x
# Build prover-stats-api
FROM base as builder
RUN --mount=target=. \
--mount=type=cache,target=/root/.cache/go-build \
cd /src/prover-stats-api/cmd/ && go build -v -p 4 -o /bin/prover-stats-api
# Pull prover-stats-api into a second stage deploy alpine container \
FROM alpine:latest
COPY --from=builder /bin/prover-stats-api /bin/
ENTRYPOINT ["prover-stats-api"]

View File

@@ -83,9 +83,7 @@ func (c *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose || c.openLog {
fmt.Printf("%s:\n\t%v", c.name, out)
} else if strings.Contains(strings.ToLower(out), "error") ||
strings.Contains(strings.ToLower(out), "warning") ||
strings.Contains(strings.ToLower(out), "info") {
} else if strings.Contains(strings.ToLower(out), "error") || strings.Contains(strings.ToLower(out), "warning") || strings.Contains(strings.ToLower(out), "info") {
fmt.Printf("%s:\n\t%v", c.name, out)
}
go c.checkFuncs.IterCb(func(_ string, value interface{}) {

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,4 @@
FROM scrolltech/l2geth:scroll-v4.3.34
FROM scrolltech/l2geth:scroll-v4.3.18
RUN mkdir -p /l2geth/keystore

View File

@@ -3,18 +3,14 @@ module scroll-tech/common
go 1.19
require (
github.com/bits-and-blooms/bitset v1.7.0
github.com/docker/docker v23.0.6+incompatible
github.com/gin-contrib/pprof v1.4.0
github.com/gin-gonic/gin v1.9.1
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.9
github.com/mattn/go-colorable v0.1.13
github.com/mattn/go-isatty v0.0.19
github.com/modern-go/reflect2 v1.0.2
github.com/orcaman/concurrent-map v1.0.0
github.com/prometheus/client_golang v1.14.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca
github.com/stretchr/testify v1.8.3
github.com/urfave/cli/v2 v2.25.7
gorm.io/driver/postgres v1.5.0
@@ -25,11 +21,8 @@ require (
github.com/Azure/go-ansiterm v0.0.0-20230124172434-306776ec8161 // indirect
github.com/Microsoft/go-winio v0.6.1 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/bytedance/sonic v1.9.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
@@ -40,20 +33,15 @@ require (
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/fsnotify/fsnotify v1.6.0 // indirect
github.com/gabriel-vasile/mimetype v1.4.2 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/gin-contrib/sse v0.1.0 // indirect
github.com/go-kit/kit v0.9.0 // indirect
github.com/go-logfmt/logfmt v0.5.1 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.14.1 // indirect
github.com/go-sql-driver/mysql v1.7.1 // indirect
github.com/go-stack/stack v1.8.1 // indirect
github.com/goccy/go-json v0.10.2 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/golang/snappy v0.0.5-0.20220116011046-fa5810519dcb // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
@@ -72,17 +60,13 @@ require (
github.com/jackpal/go-nat-pmp v1.0.2 // indirect
github.com/jinzhu/inflection v1.0.0 // indirect
github.com/jinzhu/now v1.1.5 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/cpuid/v2 v2.2.5 // indirect
github.com/julienschmidt/httprouter v1.3.0 // indirect
github.com/kr/pretty v0.3.1 // indirect
github.com/leodido/go-urn v1.2.4 // indirect
github.com/mattn/go-runewidth v0.0.14 // indirect
github.com/mattn/go-sqlite3 v1.14.16 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/pointerstructure v1.2.0 // indirect
github.com/moby/term v0.5.0 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/ginkgo v1.16.5 // indirect
@@ -90,13 +74,9 @@ require (
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.1.0-rc3 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
github.com/pelletier/go-toml/v2 v2.0.8 // indirect
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 // indirect
github.com/pkg/errors v0.9.1 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_model v0.3.0 // indirect
github.com/prometheus/common v0.39.0 // indirect
github.com/prometheus/procfs v0.9.0 // indirect
github.com/prometheus/tsdb v0.7.1 // indirect
github.com/rivo/uniseg v0.4.4 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
@@ -109,21 +89,17 @@ require (
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 // indirect
github.com/tklauser/go-sysconf v0.3.11 // indirect
github.com/tklauser/numcpus v0.6.0 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/tyler-smith/go-bip39 v1.1.0 // indirect
github.com/ugorji/go/codec v1.2.11 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/arch v0.4.0 // indirect
golang.org/x/crypto v0.12.0 // indirect
golang.org/x/crypto v0.11.0 // indirect
golang.org/x/mod v0.12.0 // indirect
golang.org/x/net v0.12.0 // indirect
golang.org/x/sync v0.3.0 // indirect
golang.org/x/sys v0.11.0 // indirect
golang.org/x/text v0.12.0 // indirect
golang.org/x/sys v0.10.0 // indirect
golang.org/x/text v0.11.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.11.0 // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/urfave/cli.v1 v1.20.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

View File

@@ -38,10 +38,6 @@ github.com/andreyvit/diff v0.0.0-20170406064948-c7f18ee00883/go.mod h1:rCTlJbsFo
github.com/apache/arrow/go/arrow v0.0.0-20191024131854-af6fa24be0db/go.mod h1:VTxUBvSJ3s3eHAg65PNgrsn5BtqCRPdmyXh6rAfdxN0=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bits-and-blooms/bitset v1.7.0 h1:YjAGVd3XmtK9ktAbX8Zg2g2PwLIMjGREZJHlV4j7NEo=
github.com/bits-and-blooms/bitset v1.7.0/go.mod h1:gIdJ4wp64HaoK2YrL1Q5/N7Y16edYb8uY+O0FJTyyDA=
github.com/bmizerany/pat v0.0.0-20170815010413-6226ea591a40/go.mod h1:8rLXio+WjiTceGBHIoTvn60HIbs7Hm7bcHjyrSqYB9c=
github.com/boltdb/bolt v1.3.1/go.mod h1:clJnj/oiGkjum5o1McbSZDSLxVThjynRyGBgiAx27Ps=
github.com/btcsuite/btcd v0.20.1-beta h1:Ik4hyJqN8Jfyv3S4AGBOmyouMsYE3EdYODkMbQjwPGw=
@@ -53,9 +49,6 @@ github.com/btcsuite/goleveldb v0.0.0-20160330041536-7834afc9e8cd/go.mod h1:F+uVa
github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku9dyThutzjeg+3xrA5iCpBRH8XEEg3lh6TiUghc=
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/bytedance/sonic v1.5.0/go.mod h1:ED5hyg4y6t3/9Ku1R6dU/4KyJ48DZ4jPhfY1O2AihPM=
github.com/bytedance/sonic v1.9.2 h1:GDaNjuWSGu09guE9Oql0MSTNhNCLlWwO8y/xM5BzcbM=
github.com/bytedance/sonic v1.9.2/go.mod h1:i736AoUSYt75HyZLoJW9ERYxcy6eaN6h4BZXU064P/U=
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
@@ -63,9 +56,6 @@ github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghf
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chenzhuoyu/base64x v0.0.0-20211019084208-fb5309c8db06/go.mod h1:DH46F32mSOjUmXrMHnKwZdA8wcEefY7UVqBKYGjpdQY=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311 h1:qSGYFH7+jGhDF8vLC+iwCD4WpbV1EBDSzWkJODFLams=
github.com/chenzhuoyu/base64x v0.0.0-20221115062448-fe3a3abad311/go.mod h1:b583jCggY9gE99b6G5LEC39OIiVsWj+R97kbl5odCEk=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -107,20 +97,11 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/fsnotify/fsnotify v1.6.0 h1:n+5WquG0fcWoWp6xPWfHdbskMCQaFnG6PfBrh1Ky4HY=
github.com/fsnotify/fsnotify v1.6.0/go.mod h1:sl3t1tCWJFWoRz9R8WJCbQihKKwmorjAbSClcnxKAGw=
github.com/gabriel-vasile/mimetype v1.4.2 h1:w5qFW6JKBz9Y393Y4q372O9A7cUSequkh1Q7OhCmWKU=
github.com/gabriel-vasile/mimetype v1.4.2/go.mod h1:zApsH/mKG4w07erKIaJPFiX0Tsq9BFQgN3qGY5GnNgA=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/gin-contrib/pprof v1.4.0 h1:XxiBSf5jWZ5i16lNOPbMTVdgHBdhfGRD5PZ1LWazzvg=
github.com/gin-contrib/pprof v1.4.0/go.mod h1:RrehPJasUVBPK6yTUwOl8/NP6i0vbUgmxtis+Z5KE90=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.8.1/go.mod h1:ji8BvRH1azfM+SYow9zQ6SZMvR8qOMZHmsCuWR9tTTk=
github.com/gin-gonic/gin v1.9.1 h1:4idEAncQnU5cB7BeOkPtxjfCSye0AAm1R0RVIqJ+Jmg=
github.com/gin-gonic/gin v1.9.1/go.mod h1:hPrL7YrpYKXt5YId3A/Tnip5kqbEAP+KLuI3SUcPTeU=
github.com/glycerine/go-unsnap-stream v0.0.0-20180323001048-9f0cb55181dd/go.mod h1:/20jfyN9Y5QPEAprSgKAUr+glWDY39ZiUEAYOEv5dsE=
github.com/glycerine/goconvey v0.0.0-20190410193231-58a59202ab31/go.mod h1:Ogl1Tioa0aV7gstGFO7KhffUsb9M4ydbEbbxpcEDc24=
github.com/go-chi/chi/v5 v5.0.0/go.mod h1:BBug9lr0cqtdAhsu6R4AAdvufI0/XBzAQSsUqJpoZOs=
@@ -132,21 +113,11 @@ github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.1 h1:otpy5pqBCBZ1ng9RQ0dPu4PN7ba75Y/aA+UpowDyNVA=
github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KEVveWlfTs=
github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/locales v0.14.0/go.mod h1:sawfccIbzZTqEDETgFXqTho0QybSa7l++s0DH+LDiLs=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.0/go.mod h1:UvRDBj+xPUEGrFYl+lu/H90nyDXpg0fqeB/AQUGNTVA=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.10.0/go.mod h1:74x4gJWsvQexRdW8Pn3dXSGrTK4nAUsbPlLADvpJkos=
github.com/go-playground/validator/v10 v10.14.1 h1:9c50NUPC30zyuKprjL3vNZ0m5oG+jU0zvx4AqHGnv4k=
github.com/go-playground/validator/v10 v10.14.1/go.mod h1:9iXMNT7sEkjXb0I+enO7QXmzG6QCsPWY4zveKFVRSyU=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg=
github.com/go-sql-driver/mysql v1.7.1 h1:lUIinVbN1DY0xBg0eMOzmmtGoHwWBbvnWubQUrtU8EI=
@@ -155,9 +126,6 @@ github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/me
github.com/go-stack/stack v1.8.1 h1:ntEHSVwIt7PNXNpgPmVfMrNhLtgjlmnZha2kOpuRiDw=
github.com/go-stack/stack v1.8.1/go.mod h1:dcoOX6HbPZSZptuspn9bctJ+N/CnF5gGygcUP3XYfe4=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/goccy/go-json v0.9.7/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/goccy/go-json v0.10.2 h1:CrxCmQqYDkv1z7lO7Wbh2HN93uovUHgrECaO5ZrCXAU=
github.com/goccy/go-json v0.10.2/go.mod h1:6MelG93GURQebXPDq3khkgXZkazVtN9CRI+MGFi0w8I=
github.com/gofrs/uuid v3.3.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/gogo/protobuf v1.3.1/go.mod h1:SlYgWuQ5SjCEi6WLHjHCa1yvBfUnHcTbrrZtXPKa29o=
@@ -174,16 +142,12 @@ github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFU
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.5/go.mod h1:6O5/vntMXwX2lRkT1hjjk0nAC1IDOTvTlVgjlRvqsdk=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.3/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
@@ -199,7 +163,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -266,14 +230,13 @@ github.com/jmoiron/sqlx v1.3.5 h1:vFFPA71p1o5gAeqtEAwLU4dnX2napprKtHr7PYIcN3g=
github.com/jmoiron/sqlx v1.3.5/go.mod h1:nRVWtLre0KfCLJvgxzCsLVMogSvQ1zNJtpYr2Ccp0mQ=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jsternberg/zap-logfmt v1.0.0/go.mod h1:uvPs/4X51zdkcm5jXl5SYoN+4RK21K8mysFmDaM/h+o=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0 h1:U0609e9tgbseu3rBINet9P48AI/D3oJs4dN7jwJOQ1U=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/jung-kurt/gofpdf v1.0.3-0.20190309125859-24315acbbda5/go.mod h1:7Id9E/uU8ce6rXgefFLlgrJj/GYY22cpxn+r32jIOes=
github.com/jwilder/encoding v0.0.0-20170811194829-b4e1701a28ef/go.mod h1:Ct9fl0F6iIOGgxJ5npU/IUOhOhqlVrGjyIZc8/MagT0=
github.com/kisielk/errcheck v1.2.0/go.mod h1:/BMXB+zMLi60iA8Vv6Ksmxu/1UDYcXs4uQLJ+jE2L00=
@@ -282,9 +245,6 @@ github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+o
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
github.com/klauspost/cpuid v0.0.0-20170728055534-ae7887de9fa5/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.5 h1:0E5MSMDEoAulmXNFquVs//DdoomxaoTY1kUhbc/qbZg=
github.com/klauspost/cpuid/v2 v2.2.5/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
github.com/klauspost/crc32 v0.0.0-20161016154125-cb6bfca970f6/go.mod h1:+ZoRqAPRLkC4NPOvfYeR5KNOrY6TD+/sAC3HXPZgDYg=
github.com/klauspost/pgzip v1.0.2-0.20170402124221-0bf5dcad4ada/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
@@ -301,9 +261,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/labstack/echo/v4 v4.2.1/go.mod h1:AA49e0DZ8kk5jTOOCKNuPR6oTnBS0dYiM4FW1e6jwpg=
github.com/labstack/gommon v0.3.0/go.mod h1:MULnywXg0yavhxWKc+lOruYdAhDwPK9wf0OL7NoOu+k=
github.com/leanovate/gopter v0.2.9 h1:fQjYxZaynp97ozCzfOyOuAGOU4aU/z37zf/tOujFk7c=
github.com/leodido/go-urn v1.2.1/go.mod h1:zt4jvISO2HfUBqxjfIshjdMTYS56ZS/qv49ictyFfxY=
github.com/leodido/go-urn v1.2.4 h1:XlAE/cm/ms7TE/VMVoduSpNBoyc2dOxHs5MZSwAN63Q=
github.com/leodido/go-urn v1.2.4/go.mod h1:7ZrI8mTSeBSHl/UaRyKQW1qZeMgak41ANeCNaVckg+4=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.10.9 h1:YXG7RB+JIjhP29X+OtkiDnYaXQwpS4JEWq7dtCCRUEw=
@@ -321,7 +278,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-isatty v0.0.16/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM=
github.com/mattn/go-isatty v0.0.19 h1:JITubQf0MOLdlGRuRq+jtsDlekdYPia9ZFsB8h/APPA=
github.com/mattn/go-isatty v0.0.19/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
@@ -335,8 +291,6 @@ github.com/mattn/go-sqlite3 v1.14.16 h1:yOQRA0RpS5PFz/oikGwBEqvAWhWg5ufRz4ETLjwp
github.com/mattn/go-sqlite3 v1.14.16/go.mod h1:2eHXhiwb8IkHr+BDWZGa96P6+rkvnG63S2DGjv9HUNg=
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/matttproud/golang_protobuf_extensions v1.0.4 h1:mmDVorXM7PCGKw94cs5zkfA9PSy5pEvNWRP0ET0TIVo=
github.com/matttproud/golang_protobuf_extensions v1.0.4/go.mod h1:BSXmuO+STAnVfrANrmjBb36TMTDstsz7MSK+HVaYKv4=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
@@ -344,8 +298,6 @@ github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjU
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
@@ -382,9 +334,6 @@ github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/pelletier/go-toml/v2 v2.0.1/go.mod h1:r9LEWfGN8R5k0VXJ+0BkIe7MYkRdwZOjgMj2KwnJFUo=
github.com/pelletier/go-toml/v2 v2.0.8 h1:0ctb6s9mE31h0/lhu+J6OPmVeDxJn+kYnJc2jZR9tGQ=
github.com/pelletier/go-toml/v2 v2.0.8/go.mod h1:vuYfssBdrU2XDZ9bYydBu6t+6a6PYNcZljzZR9VXg+4=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7/go.mod h1:CRroGNssyjTd/qIG2FyxByd2S8JEAZXBl4qUrZf8GS0=
@@ -400,22 +349,14 @@ github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZb
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.14.0 h1:nJdhIvne2eSX/XRAFV9PcvFFRbrjbcTUj0VP62TMhnw=
github.com/prometheus/client_golang v1.14.0/go.mod h1:8vpkKitgIVNcqrRBWh1C4TIUQgYNtG/XQE4E/Zae36Y=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.3.0 h1:UBgGFHqYdG/TPFD1B1ogZywDqEkwp3fBMvqdiQ7Xew4=
github.com/prometheus/client_model v0.3.0/go.mod h1:LDGWKZIo7rky3hgvBe+caln+Dr3dPggB5dvjtD7w9+w=
github.com/prometheus/common v0.0.0-20181113130724-41aa239b4cce/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.39.0 h1:oOyhkDq05hPZKItWVBkJ6g6AtGxi+fy7F4JvUV8uhsI=
github.com/prometheus/common v0.39.0/go.mod h1:6XBZ7lYdLCbkAVhwRsWTZn+IN5AB9F/NXd5w0BbEX0Y=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.9.0 h1:wzCHvIvM5SxWqYvwgVL7yJY8Lz3PKn49KQtpgMYJfhI=
github.com/prometheus/procfs v0.9.0/go.mod h1:+pB4zwohETzFnmlpe6yd2lSc+0/46IYZRB/chUwxUZY=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
@@ -426,7 +367,6 @@ github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeC
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.0/go.mod h1:WmiCO8CzOY8rg0OYDC4/i/2WRWAB6poM+XZ2dLUbcbE=
github.com/rogpeppe/go-internal v1.9.0/go.mod h1:WtVeX8xhTBvf0smdhujwtBcq4Qrzq/fJaraNFVN+nFs=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog=
@@ -434,8 +374,8 @@ github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28 h1:CECBTWhZ5NGAn8lGFB4ooRAYxZns8PXoX8kTR/14c04=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230812030736-25fe3ba69a28/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca h1:aQKsL9FiCPB6Eo3nuyxmw6aZpAhmFvJsRb7XGtXnOUc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230804022247-26eeb40ea3ca/go.mod h1:DiN3p2inoXOxGffxSswDKqWjQ7bU+Mp0c9v0XQXKmaA=
github.com/scroll-tech/zktrie v0.6.0 h1:xLrMAO31Yo2BiPg1jtYKzcjpEFnXy8acbB7iIsyshPs=
github.com/scroll-tech/zktrie v0.6.0/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@@ -461,12 +401,10 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.3 h1:RP3t2pwF7cMEbC1dqtB6poj3niw/9gnV4Cjg5oW5gtY=
github.com/stretchr/testify v1.8.3/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
@@ -476,14 +414,8 @@ github.com/tklauser/go-sysconf v0.3.11 h1:89WgdJhk5SNwJfu+GKyYveZ4IaJ7xAkecBo+Kd
github.com/tklauser/go-sysconf v0.3.11/go.mod h1:GqXfhXY3kiPa0nAXPDIQIWzJbMCB7AmcWpGR8lSZfqI=
github.com/tklauser/numcpus v0.6.0 h1:kebhY2Qt+3U6RNK7UqpYNA+tJ23IBEGKkB7JQBfDYms=
github.com/tklauser/numcpus v0.6.0/go.mod h1:FEZLMke0lhOUG6w2JadTzp0a+Nl8PF/GFkQ5UVIcaL4=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/tyler-smith/go-bip39 v1.1.0 h1:5eUemwrMargf3BSLRRCalXT93Ns6pQJIjYQN2nyfOP8=
github.com/tyler-smith/go-bip39 v1.1.0/go.mod h1:gUYDtqQw1JS3ZJ8UWVcGTGqqr6YIN3CWg+kkNaLt55U=
github.com/ugorji/go v1.2.7/go.mod h1:nF9osbDWLy6bDVv/Rtoh6QgnvNDpmCalQV5urGCCS6M=
github.com/ugorji/go/codec v1.2.7/go.mod h1:WGN1fab3R1fzQlVQTkfxVtIBhWDRqOviHU95kRgeqEY=
github.com/ugorji/go/codec v1.2.11 h1:BMaWp1Bb6fHwEtbplGBGJ498wD+LKlNSl25MjdZY4dU=
github.com/ugorji/go/codec v1.2.11/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/urfave/cli/v2 v2.25.7 h1:VAzn5oq403l5pHjc4OhD54+XGO9cdKVL/7lDjF+iKUs=
github.com/urfave/cli/v2 v2.25.7/go.mod h1:8qnjx1vcq5s2/wpsqoZFndg2CE5tNFyrTvS6SinrnYQ=
github.com/valyala/bytebufferpool v1.0.0/go.mod h1:6bBcMArwyJ5K/AmCkWv1jt77kVWyCJ6HpOuEn7z0Csc=
@@ -504,9 +436,6 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/arch v0.0.0-20210923205945-b76863e36670/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/arch v0.4.0 h1:A8WCeEWhLwPBKNbFi5Wv5UTCBx5zzubnXDlMOFAzFMc=
golang.org/x/arch v0.4.0/go.mod h1:5om86z9Hs0C8fWVUuoMHwpExlXzs5Tkyp9hOrfG7pp8=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
@@ -516,11 +445,10 @@ golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210711020723-a769d52b0f97/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.6.0/go.mod h1:OFC/31mSvZgRz0V1QTNCzfAI1aIRzbiufJtkMIlEp58=
golang.org/x/crypto v0.12.0 h1:tFM/ta59kqch6LlvYnPa0yx5a83cL2nHflFhYKvv9Yk=
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.11.0 h1:6Ewdq3tDic1mg5xRO4milcWCfMVQhI4NkqWWvqejpuA=
golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIio=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -632,8 +560,6 @@ golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220811171246-fbc7d0a398ab/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
@@ -641,8 +567,8 @@ golang.org/x/sys v0.0.0-20220908164124-27713097b956/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0 h1:eG7RXZHdqOJ1i+0lgLgCpSXAp6M3LYlAo6osgSi0xOM=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.10.0 h1:SqMFp9UcQJZa+pmYuAKjd9xq1f0j5rLcDIk0mj4qAsA=
golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -652,11 +578,10 @@ golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.12.0 h1:k+n5B8goJNdU7hSvEtMUz3d1Q6D/XW4COJSJR6fN0mc=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.11.0 h1:LAntKIrcmeSKERyiOh0XMV39LXS8IE9UL2yP7+f5ij4=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -743,11 +668,6 @@ google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQ
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0 h1:g0LDEJHgrBl9N9r17Ru3sqWhkIx2NB67okBHPwC7hs8=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@@ -768,7 +688,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gorm.io/driver/postgres v1.5.0 h1:u2FXTy14l45qc3UeCJ7QaAXZmZfDDv0YrthvmRq1l0U=

View File

@@ -32,14 +32,13 @@ dependencies = [
[[package]]
name = "aggregator"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"ark-std",
"env_logger 0.10.0",
"eth-types",
"ethers-core",
"halo2_proofs",
"hex",
"itertools",
"log",
"rand",
@@ -433,7 +432,7 @@ checksum = "a3e2c3daef883ecc1b5d58c15adae93470a91d425f3532ba1695849656af3fc1"
[[package]]
name = "bus-mapping"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"eth-types",
"ethers-core",
@@ -631,12 +630,9 @@ checksum = "6245d59a3e82a7fc217c5828a6692dbc6dfb63a0c8c90495621f7b9d79704a0e"
[[package]]
name = "convert_case"
version = "0.6.0"
version = "0.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "ec182b0ca2f35d8fc196cf3404988fd8b8c739a4d270ff118a398feb0cbec1ca"
dependencies = [
"unicode-segmentation",
]
checksum = "fb4a24b1aaf0fd0ce8b45161144d6f42cd91677fd5940fd431183eb023b3a2b8"
[[package]]
name = "core-foundation-sys"
@@ -1049,7 +1045,7 @@ dependencies = [
[[package]]
name = "eth-types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"ethers-core",
"ethers-signers",
@@ -1142,12 +1138,13 @@ dependencies = [
[[package]]
name = "ethers-core"
version = "0.17.0"
source = "git+https://github.com/scroll-tech/ethers-rs.git?branch=v0.17.0#739ec9a0df8daf536937739c87e85612bd73212f"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0ebdd63c828f58aa067f40f9adcbea5e114fb1f90144b3a1e2858e0c9b1ff4e8"
dependencies = [
"arrayvec",
"bytes",
"chrono",
"convert_case 0.6.0",
"convert_case 0.5.0",
"elliptic-curve",
"ethabi",
"fastrlp",
@@ -1226,7 +1223,7 @@ dependencies = [
[[package]]
name = "external-tracer"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"eth-types",
"geth-utils",
@@ -1439,7 +1436,7 @@ dependencies = [
[[package]]
name = "gadgets"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"digest 0.7.6",
"eth-types",
@@ -1479,7 +1476,7 @@ dependencies = [
[[package]]
name = "geth-utils"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"env_logger 0.9.3",
"gobuild 0.1.0-alpha.2 (git+https://github.com/scroll-tech/gobuild.git)",
@@ -1583,7 +1580,7 @@ dependencies = [
[[package]]
name = "halo2-base"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"halo2_proofs",
@@ -1598,7 +1595,7 @@ dependencies = [
[[package]]
name = "halo2-ecc"
version = "0.2.2"
source = "git+https://github.com/scroll-tech/halo2-lib?tag=v0.1.0#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
source = "git+https://github.com/scroll-tech/halo2-lib?branch=develop#2c225864227e74b207d9f4b9e08c4d5f1afc69a1"
dependencies = [
"ff",
"group",
@@ -1633,7 +1630,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?tag=v0.5.1#2163a9c436ed85363c954ecf7e6e1044a1b991dc"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=v0.5#1c11b6c9b1245073a76c3ce7100b6798060f7cb8"
dependencies = [
"ethers-core",
"halo2_proofs",
@@ -1655,7 +1652,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#19de67c07a9b9b567580466763f93ebfbc3bb799"
source = "git+https://github.com/scroll-tech/halo2.git?branch=develop#4d8a405a97ce445c88fc8ec40f9a2dd0a661c697"
dependencies = [
"ark-std",
"blake2b_simd",
@@ -2077,7 +2074,7 @@ dependencies = [
[[package]]
name = "keccak256"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"env_logger 0.9.3",
"eth-types",
@@ -2264,7 +2261,7 @@ dependencies = [
[[package]]
name = "mock"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"eth-types",
"ethers-core",
@@ -2279,7 +2276,7 @@ dependencies = [
[[package]]
name = "mpt-zktrie"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"bus-mapping",
"eth-types",
@@ -2754,8 +2751,8 @@ dependencies = [
[[package]]
name = "prover"
version = "0.7.3"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.3#1b1fb34f8662dbb11af12c8dfd72f8da5ae80422"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.1#15aac6e1484a42f723098fbc9d8783f374e7e90a"
dependencies = [
"aggregator",
"anyhow",
@@ -3624,7 +3621,7 @@ checksum = "62bb4feee49fdd9f707ef802e22365a35de4b7b299de4763d44bfea899442ff9"
[[package]]
name = "snark-verifier"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#f8bdcbee60348e5c996c04f19ff30522e6b276b0"
dependencies = [
"bytes",
"ethereum-types 0.14.1",
@@ -3648,7 +3645,7 @@ dependencies = [
[[package]]
name = "snark-verifier-sdk"
version = "0.0.1"
source = "git+https://github.com/scroll-tech/snark-verifier?tag=v0.1.2#4466059ce9a6dfaf26455e4ffb61d72af775cf52"
source = "git+https://github.com/scroll-tech/snark-verifier?branch=develop#f8bdcbee60348e5c996c04f19ff30522e6b276b0"
dependencies = [
"bincode",
"env_logger 0.10.0",
@@ -4039,8 +4036,8 @@ checksum = "497961ef93d974e23eb6f433eb5fe1b7930b659f06d12dec6fc44a8f554c0bba"
[[package]]
name = "types"
version = "0.7.3"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.7.3#1b1fb34f8662dbb11af12c8dfd72f8da5ae80422"
version = "0.4.0"
source = "git+https://github.com/scroll-tech/scroll-prover?tag=v0.5.1#15aac6e1484a42f723098fbc9d8783f374e7e90a"
dependencies = [
"base64 0.13.1",
"blake2",
@@ -4085,12 +4082,6 @@ dependencies = [
"tinyvec",
]
[[package]]
name = "unicode-segmentation"
version = "1.10.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1dd624098567895118886609431a7c3b8f516e41d30e0643f03d94592a147e36"
[[package]]
name = "unicode-xid"
version = "0.2.4"
@@ -4491,11 +4482,10 @@ checksum = "2a0956f1ba7c7909bfb66c2e9e4124ab6f6482560f6628b5aaeba39207c9aad9"
[[package]]
name = "zkevm-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?tag=v0.7.3#594b237563a275b4984631882400adb5372164f7"
source = "git+https://github.com/scroll-tech/zkevm-circuits.git?branch=develop#2855c13b5d3e6ec4056f823f56a33bf25d0080bb"
dependencies = [
"array-init",
"bus-mapping",
"either",
"env_logger 0.9.3",
"eth-types",
"ethers-core",
@@ -4536,7 +4526,6 @@ dependencies = [
name = "zkp"
version = "0.1.0"
dependencies = [
"base64 0.13.1",
"env_logger 0.9.3",
"halo2_proofs",
"libc",

View File

@@ -7,8 +7,6 @@ edition = "2021"
[lib]
crate-type = ["cdylib"]
[patch.crates-io]
ethers-core = { git = "https://github.com/scroll-tech/ethers-rs.git", branch = "v0.17.0" }
[patch."https://github.com/privacy-scaling-explorations/halo2.git"]
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
[patch."https://github.com/privacy-scaling-explorations/poseidon.git"]
@@ -20,21 +18,23 @@ maingate = { git = "https://github.com/scroll-tech/halo2wrong", branch = "halo2-
halo2curves = { git = "https://github.com/scroll-tech/halo2curves.git", branch = "0.3.1-derive-serde" }
[dependencies]
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.3" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.7.3" }
prover = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.1" }
types = { git = "https://github.com/scroll-tech/scroll-prover", tag = "v0.5.1" }
halo2_proofs = { git = "https://github.com/scroll-tech/halo2.git", branch = "develop" }
base64 = "0.13.0"
env_logger = "0.9.0"
libc = "0.2"
log = "0.4"
once_cell = "1.8.0"
env_logger = "0.9.0"
serde = "1.0"
serde_derive = "1.0"
serde_json = "1.0.66"
libc = "0.2"
once_cell = "1.8.0"
[profile.test]
opt-level = 3
# debug-assertions = true
[profile.release]
opt-level = 3
# debug-assertions = true

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
aggregator::{Prover, Verifier},
utils::{chunk_trace_to_witness_block, init_env_and_log},
BatchProof, ChunkHash, ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use std::{cell::OnceCell, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,15 +13,11 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char, assets_dir: *const c_char) {
pub unsafe extern "C" fn init_batch_prover(params_dir: *const c_char) {
init_env_and_log("ffi_batch_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_dirs(params_dir, assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
}
@@ -34,35 +30,11 @@ pub unsafe extern "C" fn init_batch_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_batch_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn check_chunk_proofs(chunk_proofs: *const c_char) -> c_char {
let chunk_proofs = c_char_to_vec(chunk_proofs);
let chunk_proofs = serde_json::from_slice::<Vec<ChunkProof>>(&chunk_proofs).unwrap();
assert!(!chunk_proofs.is_empty());
let valid = panic::catch_unwind(|| PROVER.get().unwrap().check_chunk_proofs(&chunk_proofs));
valid.unwrap_or(false) as c_char
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_batch_proof(

View File

@@ -1,11 +1,11 @@
use crate::utils::{c_char_to_str, c_char_to_vec, string_to_c_char, vec_to_c_char, OUTPUT_DIR};
use crate::utils::{c_char_to_str, c_char_to_vec, vec_to_c_char, OUTPUT_DIR};
use libc::c_char;
use prover::{
utils::init_env_and_log,
zkevm::{Prover, Verifier},
ChunkProof,
};
use std::{cell::OnceCell, env, panic, ptr::null};
use std::{cell::OnceCell, panic, ptr::null};
use types::eth::BlockTrace;
static mut PROVER: OnceCell<Prover> = OnceCell::new();
@@ -13,14 +13,10 @@ static mut VERIFIER: OnceCell<Verifier> = OnceCell::new();
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char, assets_dir: *const c_char) {
pub unsafe extern "C" fn init_chunk_prover(params_dir: *const c_char) {
init_env_and_log("ffi_chunk_prove");
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let prover = Prover::from_params_dir(params_dir);
PROVER.set(prover).unwrap();
@@ -34,24 +30,11 @@ pub unsafe extern "C" fn init_chunk_verifier(params_dir: *const c_char, assets_d
let params_dir = c_char_to_str(params_dir);
let assets_dir = c_char_to_str(assets_dir);
// TODO: add a settings in scroll-prover.
env::set_var("SCROLL_PROVER_ASSETS_DIR", assets_dir);
let verifier = Verifier::from_dirs(params_dir, assets_dir);
VERIFIER.set(verifier).unwrap();
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn get_chunk_vk() -> *const c_char {
let vk_result = panic::catch_unwind(|| PROVER.get_mut().unwrap().get_vk());
vk_result
.ok()
.flatten()
.map_or(null(), |vk| string_to_c_char(base64::encode(vk)))
}
/// # Safety
#[no_mangle]
pub unsafe extern "C" fn gen_chunk_proof(block_traces: *const c_char) -> *const c_char {

View File

@@ -19,10 +19,6 @@ pub(crate) fn c_char_to_vec(c: *const c_char) -> Vec<u8> {
cstr.to_bytes().to_vec()
}
pub(crate) fn string_to_c_char(string: String) -> *const c_char {
CString::new(string).unwrap().into_raw()
}
pub(crate) fn vec_to_c_char(bytes: Vec<u8>) -> *const c_char {
CString::new(bytes).unwrap().into_raw()
}

View File

@@ -1,13 +1,10 @@
void init_batch_prover(char* params_dir, char* assets_dir);
void init_batch_prover(char* params_dir);
void init_batch_verifier(char* params_dir, char* assets_dir);
char* get_batch_vk();
char check_chunk_proofs(char* chunk_proofs);
char* gen_batch_proof(char* chunk_hashes, char* chunk_proofs);
char verify_batch_proof(char* proof);
void init_chunk_prover(char* params_dir, char* assets_dir);
void init_chunk_prover(char* params_dir);
void init_chunk_verifier(char* params_dir, char* assets_dir);
char* get_chunk_vk();
char* gen_chunk_proof(char* block_traces);
char verify_chunk_proof(char* proof);

View File

@@ -1,57 +0,0 @@
package ginmetrics
import (
"github.com/bits-and-blooms/bitset"
)
const defaultSize = 2 << 24
var seeds = []uint{7, 11, 13, 31, 37, 61}
// BloomFilter a simple bloom filter
type BloomFilter struct {
Set *bitset.BitSet
Funcs [6]simpleHash
}
// NewBloomFilter new a BloomFilter
func NewBloomFilter() *BloomFilter {
bf := new(BloomFilter)
for i := 0; i < len(bf.Funcs); i++ {
bf.Funcs[i] = simpleHash{defaultSize, seeds[i]}
}
bf.Set = bitset.New(defaultSize)
return bf
}
// Add a value to BloomFilter
func (bf *BloomFilter) Add(value string) {
for _, f := range bf.Funcs {
bf.Set.Set(f.hash(value))
}
}
// Contains check the value is in bloom filter
func (bf *BloomFilter) Contains(value string) bool {
if value == "" {
return false
}
ret := true
for _, f := range bf.Funcs {
ret = ret && bf.Set.Test(f.hash(value))
}
return ret
}
type simpleHash struct {
Cap uint
Seed uint
}
func (s *simpleHash) hash(value string) uint {
var result uint = 0
for i := 0; i < len(value); i++ {
result = result*s.Seed + uint(value[i])
}
return (s.Cap - 1) & result
}

View File

@@ -1,89 +0,0 @@
package ginmetrics
import (
"fmt"
"github.com/prometheus/client_golang/prometheus"
)
// Metric defines a metric object. Users can use it to save
// metric data. Every metric should be globally unique by name.
type Metric struct {
Type MetricType
Name string
Description string
Labels []string
Buckets []float64
Objectives map[float64]float64
vec prometheus.Collector
}
// SetGaugeValue set data for Gauge type Metric.
func (m *Metric) SetGaugeValue(labelValues []string, value float64) error {
if m.Type == None {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Gauge {
return fmt.Errorf("metric %s not Gauge type", m.Name)
}
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Set(value)
return nil
}
// Inc increases value for Counter/Gauge type metric, increments
// the counter by 1
func (m *Metric) Inc(labelValues []string) error {
if m.Type == None {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Gauge && m.Type != Counter {
return fmt.Errorf("metric %s not Gauge or Counter type", m.Name)
}
switch m.Type {
case Counter:
m.vec.(*prometheus.CounterVec).WithLabelValues(labelValues...).Inc()
case Gauge:
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Inc()
}
return nil
}
// Add adds the given value to the Metric object. Only
// for Counter/Gauge type metric.
func (m *Metric) Add(labelValues []string, value float64) error {
if m.Type == None {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Gauge && m.Type != Counter {
return fmt.Errorf("metric %s not Gauge or Counter type", m.Name)
}
switch m.Type {
case Counter:
m.vec.(*prometheus.CounterVec).WithLabelValues(labelValues...).Add(value)
case Gauge:
m.vec.(*prometheus.GaugeVec).WithLabelValues(labelValues...).Add(value)
}
return nil
}
// Observe is used by Histogram and Summary type metric to
// add observations.
func (m *Metric) Observe(labelValues []string, value float64) error {
if m.Type == 0 {
return fmt.Errorf("metric %s not existed", m.Name)
}
if m.Type != Histogram && m.Type != Summary {
return fmt.Errorf("metric %s not Histogram or Summary type", m.Name)
}
switch m.Type {
case Histogram:
m.vec.(*prometheus.HistogramVec).WithLabelValues(labelValues...).Observe(value)
case Summary:
m.vec.(*prometheus.SummaryVec).WithLabelValues(labelValues...).Observe(value)
}
return nil
}

View File

@@ -1,155 +0,0 @@
package ginmetrics
import (
"fmt"
"strconv"
"time"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus/promhttp"
)
var (
metricRequestTotal = "request_total"
metricRequestUVTotal = "request_uv_total"
metricURIRequestTotal = "uri_request_total"
metricRequestBody = "request_body_total"
metricResponseBody = "response_body_total"
metricRequestDuration = "request_duration"
metricSlowRequest = "slow_request_total"
bloomFilter *BloomFilter
)
// Use set gin metrics middleware
func (m *Monitor) Use(r gin.IRoutes) {
m.initGinMetrics()
r.Use(m.monitorInterceptor)
r.GET(m.metricPath, func(ctx *gin.Context) {
promhttp.Handler().ServeHTTP(ctx.Writer, ctx.Request)
})
}
// UseWithoutExposingEndpoint is used to add monitor interceptor to gin router
// It can be called multiple times to intercept from multiple gin.IRoutes
// http path is not set, to do that use Expose function
func (m *Monitor) UseWithoutExposingEndpoint(r gin.IRoutes) {
m.initGinMetrics()
r.Use(m.monitorInterceptor)
}
// Expose adds metric path to a given router.
// The router can be different with the one passed to UseWithoutExposingEndpoint.
// This allows to expose metrics on different port.
func (m *Monitor) Expose(r gin.IRoutes) {
r.GET(m.metricPath, func(ctx *gin.Context) {
promhttp.Handler().ServeHTTP(ctx.Writer, ctx.Request)
})
}
// initGinMetrics used to init gin metrics
func (m *Monitor) initGinMetrics() {
bloomFilter = NewBloomFilter()
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricRequestTotal,
Description: "all the server received request num.",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricRequestUVTotal,
Description: "all the server received ip num.",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricURIRequestTotal,
Description: "all the server received request num with every uri.",
Labels: []string{"uri", "method", "code"},
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricRequestBody,
Description: "the server received request body size, unit byte",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricResponseBody,
Description: "the server send response body size, unit byte",
Labels: nil,
})
_ = monitor.AddMetric(&Metric{
Type: Histogram,
Name: metricRequestDuration,
Description: "the time server took to handle the request.",
Labels: []string{"uri"},
Buckets: m.reqDuration,
})
_ = monitor.AddMetric(&Metric{
Type: Counter,
Name: metricSlowRequest,
Description: fmt.Sprintf("the server handled slow requests counter, t=%d.", m.slowTime),
Labels: []string{"uri", "method", "code"},
})
}
// monitorInterceptor as gin monitor middleware.
func (m *Monitor) monitorInterceptor(ctx *gin.Context) {
if ctx.Request.URL.Path == m.metricPath {
ctx.Next()
return
}
startTime := time.Now()
// execute normal process.
ctx.Next()
// after request
m.ginMetricHandle(ctx, startTime)
}
func (m *Monitor) ginMetricHandle(ctx *gin.Context, start time.Time) {
r := ctx.Request
w := ctx.Writer
//set request total
_ = m.GetMetric(metricRequestTotal).Inc(nil)
// set uv
if clientIP := ctx.ClientIP(); !bloomFilter.Contains(clientIP) {
bloomFilter.Add(clientIP)
_ = m.GetMetric(metricRequestUVTotal).Inc(nil)
}
errCode := strconv.Itoa(ctx.GetInt("errcode"))
if len(errCode) == 0 {
errCode = strconv.Itoa(w.Status())
}
// set uri request total
_ = m.GetMetric(metricURIRequestTotal).Inc([]string{ctx.FullPath(), r.Method, errCode})
// set request body size
// since r.ContentLength can be negative (in some occasions) guard the operation
if r.ContentLength >= 0 {
_ = m.GetMetric(metricRequestBody).Add(nil, float64(r.ContentLength))
}
// set slow request
latency := time.Since(start)
if int32(latency.Seconds()) > m.slowTime {
_ = m.GetMetric(metricSlowRequest).Inc([]string{ctx.FullPath(), r.Method, strconv.Itoa(w.Status())})
}
// set request duration
_ = m.GetMetric(metricRequestDuration).Observe([]string{ctx.FullPath()}, latency.Seconds())
// set response size
if w.Size() > 0 {
_ = m.GetMetric(metricResponseBody).Add(nil, float64(w.Size()))
}
}

View File

@@ -1,158 +0,0 @@
package ginmetrics
import (
"errors"
"fmt"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promauto"
)
// MetricType define metric type
type MetricType int
const (
// None unknown metric type
None MetricType = iota
// Counter MetricType
Counter
// Gauge MetricType
Gauge
// Histogram MetricType
Histogram
// Summary MetricType
Summary
defaultMetricPath = "/debug/metrics"
defaultSlowTime = int32(5)
)
var (
defaultDuration = []float64{0.1, 0.3, 1.2, 5, 10}
monitor *Monitor
promTypeHandler = map[MetricType]func(metric *Metric, reg prometheus.Registerer){
Counter: counterHandler,
Gauge: gaugeHandler,
Histogram: histogramHandler,
Summary: summaryHandler,
}
)
// Monitor is an object that uses to set gin server monitor.
type Monitor struct {
slowTime int32
metricPath string
reqDuration []float64
metrics map[string]*Metric
register prometheus.Registerer
}
// GetMonitor used to get global Monitor object,
// this function returns a singleton object.
func GetMonitor(reg prometheus.Registerer) *Monitor {
if monitor == nil {
monitor = &Monitor{
metricPath: defaultMetricPath,
slowTime: defaultSlowTime,
reqDuration: defaultDuration,
metrics: make(map[string]*Metric),
register: reg,
}
}
return monitor
}
// GetMetric used to get metric object by metric_name.
func (m *Monitor) GetMetric(name string) *Metric {
if metric, ok := m.metrics[name]; ok {
return metric
}
return &Metric{}
}
// SetMetricPath set metricPath property. metricPath is used for Prometheus
// to get gin server monitoring data.
func (m *Monitor) SetMetricPath(path string) {
m.metricPath = path
}
// SetSlowTime set slowTime property. slowTime is used to determine whether
// the request is slow. For "gin_slow_request_total" metric.
func (m *Monitor) SetSlowTime(slowTime int32) {
m.slowTime = slowTime
}
// SetDuration set reqDuration property. reqDuration is used to ginRequestDuration
// metric buckets.
func (m *Monitor) SetDuration(duration []float64) {
m.reqDuration = duration
}
// SetMetricPrefix set the metric prefix
func (m *Monitor) SetMetricPrefix(prefix string) {
metricRequestTotal = prefix + metricRequestTotal
metricRequestUVTotal = prefix + metricRequestUVTotal
metricURIRequestTotal = prefix + metricURIRequestTotal
metricRequestBody = prefix + metricRequestBody
metricResponseBody = prefix + metricResponseBody
metricRequestDuration = prefix + metricRequestDuration
metricSlowRequest = prefix + metricSlowRequest
}
// SetMetricSuffix set the metric suffix
func (m *Monitor) SetMetricSuffix(suffix string) {
metricRequestTotal += suffix
metricRequestUVTotal += suffix
metricURIRequestTotal += suffix
metricRequestBody += suffix
metricResponseBody += suffix
metricRequestDuration += suffix
metricSlowRequest += suffix
}
// AddMetric add custom monitor metric.
func (m *Monitor) AddMetric(metric *Metric) error {
if _, ok := m.metrics[metric.Name]; ok {
return fmt.Errorf("metric %s is existed", metric.Name)
}
if metric.Name == "" {
return errors.New("metric name cannot be empty")
}
if f, ok := promTypeHandler[metric.Type]; ok {
f(metric, m.register)
m.metrics[metric.Name] = metric
}
return nil
}
func counterHandler(metric *Metric, register prometheus.Registerer) {
metric.vec = promauto.With(register).NewCounterVec(
prometheus.CounterOpts{Name: metric.Name, Help: metric.Description},
metric.Labels,
)
}
func gaugeHandler(metric *Metric, register prometheus.Registerer) {
metric.vec = promauto.With(register).NewGaugeVec(
prometheus.GaugeOpts{Name: metric.Name, Help: metric.Description},
metric.Labels,
)
}
func histogramHandler(metric *Metric, register prometheus.Registerer) {
metric.vec = promauto.With(register).NewHistogramVec(
prometheus.HistogramOpts{Name: metric.Name, Help: metric.Description, Buckets: metric.Buckets},
metric.Labels,
)
}
func summaryHandler(metric *Metric, register prometheus.Registerer) {
promauto.With(register).NewSummaryVec(
prometheus.SummaryOpts{Name: metric.Name, Help: metric.Description, Objectives: metric.Objectives},
metric.Labels,
)
}

57
common/metrics/metrics.go Normal file
View File

@@ -0,0 +1,57 @@
package metrics
import (
"context"
"net"
"net/http"
"strconv"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/metrics"
"github.com/scroll-tech/go-ethereum/metrics/prometheus"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
var (
// ScrollRegistry is used for scroll metrics.
ScrollRegistry = metrics.NewRegistry()
)
// Serve starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Serve(ctx context.Context, c *cli.Context) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
address := net.JoinHostPort(
c.String(utils.MetricsAddr.Name),
strconv.Itoa(c.Int(utils.MetricsPort.Name)),
)
server := &http.Server{
Addr: address,
Handler: prometheus.Handler(ScrollRegistry),
ReadTimeout: rpc.DefaultHTTPTimeouts.ReadTimeout,
WriteTimeout: rpc.DefaultHTTPTimeouts.WriteTimeout,
IdleTimeout: rpc.DefaultHTTPTimeouts.IdleTimeout,
}
go func() {
<-ctx.Done()
if err := server.Close(); err != nil {
log.Error("Failed to close metrics server", "error", err)
}
}()
log.Info("Starting metrics server", "address", address)
go func() {
if err := server.ListenAndServe(); err != nil {
log.Error("start metrics server error", "error", err)
}
}()
}

View File

@@ -1,18 +0,0 @@
package metrics
import (
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"scroll-tech/common/metrics/ginmetrics"
)
// Use register the gin metric
func Use(router *gin.Engine, metricsPrefix string, reg prometheus.Registerer) {
m := ginmetrics.GetMonitor(reg)
m.SetMetricPath("/metrics")
m.SetMetricPrefix(metricsPrefix + "_")
m.SetSlowTime(1)
m.SetDuration([]float64{0.025, .05, .1, .5, 1, 5, 10})
m.UseWithoutExposingEndpoint(router)
}

View File

@@ -1,49 +0,0 @@
package metrics
import (
"errors"
"fmt"
"net/http"
"time"
// enable the pprof
_ "net/http/pprof"
"github.com/gin-contrib/pprof"
"github.com/gin-gonic/gin"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
)
// Server starts the metrics server on the given address, will be closed when the given
// context is canceled.
func Server(c *cli.Context, reg *prometheus.Registry) {
if !c.Bool(utils.MetricsEnabled.Name) {
return
}
r := gin.New()
r.Use(gin.Recovery())
pprof.Register(r)
r.GET("/metrics", func(context *gin.Context) {
promhttp.Handler().ServeHTTP(context.Writer, context.Request)
})
address := fmt.Sprintf(":%s", c.String(utils.MetricsPort.Name))
server := &http.Server{
Addr: address,
Handler: r,
ReadHeaderTimeout: time.Minute,
}
log.Info("Starting metrics server", "address", address)
go func() {
if runServerErr := server.ListenAndServe(); runServerErr != nil && !errors.Is(runServerErr, http.ErrServerClosed) {
log.Crit("run metrics http server failure", "error", runServerErr)
}
}()
}

View File

@@ -169,7 +169,7 @@ func TestBatchHeaderEncode(t *testing.T) {
assert.NotNil(t, batchHeader)
bytes = batchHeader.Encode()
assert.Equal(t, 121, len(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b34f419ce7e882295bdb5aec6cce56ffa788a5fed4744d7fbd77e4acbf409f1ca4136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
assert.Equal(t, "010000000000000001000000000000000b000000000000000b457a9e90e8e51ba2de2f66c6b589540b88cf594dac7fa7d04b99cdcfecf24e384136709aabc8a23aa17fbcc833da2f7857d3c2884feec9aae73429c135f9498500000000000000000000000000000000000000000000000000000000000003ff", common.Bytes2Hex(bytes))
}
func TestBatchHeaderHash(t *testing.T) {
@@ -230,7 +230,7 @@ func TestBatchHeaderHash(t *testing.T) {
assert.NoError(t, err)
assert.NotNil(t, batchHeader)
hash = batchHeader.Hash()
assert.Equal(t, "1c3007880f0eafe74572ede7d164ff1ee5376e9ac9bff6f7fb837b2630cddc9a", common.Bytes2Hex(hash.Bytes()))
assert.Equal(t, "0ec9547c6645d5f0c1254e121f49e93f54525cfda5bfb2236440fb3470f48902", common.Bytes2Hex(hash.Bytes()))
}
func TestBatchHeaderDecode(t *testing.T) {

View File

@@ -3,31 +3,20 @@ package types
import (
"encoding/binary"
"errors"
"fmt"
"math"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
)
// CalldataNonZeroByteGas is the gas consumption per non zero byte in calldata.
const CalldataNonZeroByteGas = 16
// GetKeccak256Gas calculates keccak256 hash gas.
func GetKeccak256Gas(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
// WrappedBlock contains the block's Header, Transactions and WithdrawTrieRoot hash.
type WrappedBlock struct {
Header *types.Header `json:"header"`
// Transactions is only used for recover types.Transactions, the from of types.TransactionData field is missing.
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
txPayloadLengthCache map[string]uint64
Transactions []*types.TransactionData `json:"transactions"`
WithdrawRoot common.Hash `json:"withdraw_trie_root,omitempty"`
RowConsumption *types.RowConsumption `json:"row_consumption"`
}
// NumL1Messages returns the number of L1 messages in this block.
@@ -47,17 +36,6 @@ func (w *WrappedBlock) NumL1Messages(totalL1MessagePoppedBefore uint64) uint64 {
return *lastQueueIndex - totalL1MessagePoppedBefore + 1
}
// NumL2Transactions returns the number of L2 transactions in this block.
func (w *WrappedBlock) NumL2Transactions() uint64 {
var count uint64
for _, txData := range w.Transactions {
if txData.Type != types.L1MessageTxType {
count++
}
}
return count
}
// Encode encodes the WrappedBlock into RollupV2 BlockContext Encoding.
func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
bytes := make([]byte, 60)
@@ -65,25 +43,20 @@ func (w *WrappedBlock) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error)
if !w.Header.Number.IsUint64() {
return nil, errors.New("block number is not uint64")
}
if len(w.Transactions) > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
// note: numL1Messages includes skipped messages
numL1Messages := w.NumL1Messages(totalL1MessagePoppedBefore)
if numL1Messages > math.MaxUint16 {
return nil, errors.New("number of L1 messages exceeds max uint16")
}
// note: numTransactions includes skipped messages
numL2Transactions := w.NumL2Transactions()
numTransactions := numL1Messages + numL2Transactions
if numTransactions > math.MaxUint16 {
return nil, errors.New("number of transactions exceeds max uint16")
}
binary.BigEndian.PutUint64(bytes[0:], w.Header.Number.Uint64())
binary.BigEndian.PutUint64(bytes[8:], w.Header.Time)
// TODO: [16:47] Currently, baseFee is 0, because we disable EIP-1559.
binary.BigEndian.PutUint64(bytes[48:], w.Header.GasLimit)
binary.BigEndian.PutUint16(bytes[56:], uint16(numTransactions))
binary.BigEndian.PutUint16(bytes[56:], uint16(len(w.Transactions)))
binary.BigEndian.PutUint16(bytes[58:], uint16(numL1Messages))
return bytes, nil
@@ -98,14 +71,17 @@ func (w *WrappedBlock) EstimateL1CommitCalldataSize() uint64 {
if txData.Type == types.L1MessageTxType {
continue
}
size += 64 // 60 bytes BlockContext + 4 bytes payload length
size += w.getTxPayloadLength(txData)
size += uint64(len(txData.Data))
}
return size
}
// EstimateL1CommitGas calculates the total L1 commit gas for this block approximately.
func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
var total uint64
var numL1Messages uint64
for _, txData := range w.Transactions {
@@ -114,10 +90,23 @@ func (w *WrappedBlock) EstimateL1CommitGas() uint64 {
continue
}
txPayloadLength := w.getTxPayloadLength(txData)
total += CalldataNonZeroByteGas * txPayloadLength // an over-estimate: treat each byte as non-zero
total += CalldataNonZeroByteGas * 64 // 60 bytes BlockContext + 4 bytes payload length
total += GetKeccak256Gas(txPayloadLength) // l2 tx hash
data, _ := hexutil.Decode(txData.Data)
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, _ := tx.MarshalBinary()
txPayloadLength := uint64(len(rlpTxData))
total += 16 * txPayloadLength // an over-estimate: treat each byte as non-zero
total += 16 * 4 // size of a uint32 field
total += getKeccakGas(txPayloadLength) // l2 tx hash
}
// sload
@@ -140,48 +129,3 @@ func (w *WrappedBlock) L2TxsNum() uint64 {
}
return count
}
func (w *WrappedBlock) getTxPayloadLength(txData *types.TransactionData) uint64 {
if w.txPayloadLengthCache == nil {
w.txPayloadLengthCache = make(map[string]uint64)
}
if length, exists := w.txPayloadLengthCache[txData.TxHash]; exists {
return length
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
if err != nil {
log.Crit("convertTxDataToRLPEncoding failed, which should not happen", "hash", txData.TxHash, "err", err)
return 0
}
txPayloadLength := uint64(len(rlpTxData))
w.txPayloadLengthCache[txData.TxHash] = txPayloadLength
return txPayloadLength
}
func convertTxDataToRLPEncoding(txData *types.TransactionData) ([]byte, error) {
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, fmt.Errorf("failed to decode txData.Data: %s, err: %w", txData.Data, err)
}
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, fmt.Errorf("failed to marshal binary of the tx: %+v, err: %w", tx, err)
}
return rlpTxData, nil
}

View File

@@ -8,6 +8,7 @@ import (
"strings"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
)
@@ -64,7 +65,23 @@ func (c *Chunk) Encode(totalL1MessagePoppedBefore uint64) ([]byte, error) {
if txData.Type == types.L1MessageTxType {
continue
}
rlpTxData, err := convertTxDataToRLPEncoding(txData)
data, err := hexutil.Decode(txData.Data)
if err != nil {
return nil, err
}
// right now we only support legacy tx
tx := types.NewTx(&types.LegacyTx{
Nonce: txData.Nonce,
To: txData.To,
Value: txData.Value.ToInt(),
Gas: txData.Gas,
GasPrice: txData.GasPrice.ToInt(),
Data: data,
V: txData.V.ToInt(),
R: txData.R.ToInt(),
S: txData.S.ToInt(),
})
rlpTxData, err := tx.MarshalBinary()
if err != nil {
return nil, err
}
@@ -129,10 +146,14 @@ func (c *Chunk) EstimateL1CommitGas() uint64 {
}
numBlocks := uint64(len(c.Blocks))
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += CalldataNonZeroByteGas // numBlocks field of chunk encoding in calldata
totalL1CommitGas += CalldataNonZeroByteGas * numBlocks * 60 // numBlocks of BlockContext in chunk
totalL1CommitGas += 100 * numBlocks // numBlocks times warm sload
totalL1CommitGas += 16 // numBlocks field of chunk encoding in calldata
totalL1CommitGas += 16 * 60 * numBlocks // BlockContext in chunk
totalL1CommitGas += GetKeccak256Gas(58*numBlocks + 32*totalTxNum) // chunk hash
getKeccakGas := func(size uint64) uint64 {
return 30 + 6*((size+31)/32) // 30 + 6 * ceil(size / 32)
}
totalL1CommitGas += getKeccakGas(58*numBlocks + 32*totalTxNum) // chunk hash
return totalL1CommitGas
}

View File

@@ -38,15 +38,11 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
assert.Equal(t, uint64(0), wrappedBlock.NumL1Messages(0))
assert.Equal(t, uint64(358), wrappedBlock.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(2), wrappedBlock.L2TxsNum())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
assert.Equal(t, uint64(0), chunk.NumL1Messages(0))
assert.Equal(t, uint64(6966), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString := hex.EncodeToString(bytes)
assert.NoError(t, err)
@@ -60,36 +56,29 @@ func TestChunkEncode(t *testing.T) {
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
assert.Equal(t, uint64(11), wrappedBlock2.NumL1Messages(0)) // 0..=9 skipped, 10 included
assert.Equal(t, uint64(96), wrappedBlock2.EstimateL1CommitCalldataSize())
assert.Equal(t, uint64(1), wrappedBlock2.L2TxsNum())
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(5002), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 97, len(bytes))
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "01000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b00000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
// Test case 5: when the chunk contains two blocks each with 1 L1MsgTx
// TODO: revise this test, we cannot reuse the same L1MsgTx twice
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
wrappedBlock2,
},
}
assert.Equal(t, uint64(11), chunk.NumL1Messages(0))
assert.Equal(t, uint64(9958), chunk.EstimateL1CommitGas())
bytes, err = chunk.Encode(0)
hexString = hex.EncodeToString(bytes)
assert.NoError(t, err)
assert.Equal(t, 193, len(bytes))
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a1200000c000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000001000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
assert.Equal(t, "02000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000b000000000000000d00000000646b6e13000000000000000000000000000000000000000000000000000000000000000000000000007a12000002000000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e105808080808000000020df0b80825dc0941a258d17bf244c4df02d40343a7626a9d321e1058080808080", hexString)
}
func TestChunkHash(t *testing.T) {
@@ -144,83 +133,5 @@ func TestChunkHash(t *testing.T) {
}
hash, err = chunk.Hash(0)
assert.NoError(t, err)
assert.Equal(t, "0x2eb7dd63bf8fc29a0f8c10d16c2ae6f9da446907c79d50f5c164d30dc8526b60", hash.Hex())
}
func TestErrorPaths(t *testing.T) {
// test 1: Header.Number is not a uint64
templateBlockTrace, err := os.ReadFile("../testdata/blockTrace_02.json")
assert.NoError(t, err)
wrappedBlock := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Header.Number = wrappedBlock.Header.Number.Lsh(wrappedBlock.Header.Number, 64)
bytes, err := wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "block number is not uint64")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
for i := 0; i < 65537; i++ {
wrappedBlock.Transactions = append(wrappedBlock.Transactions, wrappedBlock.Transactions[0])
}
bytes, err = wrappedBlock.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
chunk := &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of transactions exceeds max uint16")
wrappedBlock.Transactions = wrappedBlock.Transactions[:1]
wrappedBlock.Transactions[0].Data = "not-a-hex"
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "hex string without 0x prefix")
assert.NoError(t, json.Unmarshal(templateBlockTrace, wrappedBlock))
wrappedBlock.Transactions[0].TxHash = "not-a-hex"
_, err = chunk.Hash(0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid byte")
templateBlockTrace2, err := os.ReadFile("../testdata/blockTrace_04.json")
assert.NoError(t, err)
wrappedBlock2 := &WrappedBlock{}
assert.NoError(t, json.Unmarshal(templateBlockTrace2, wrappedBlock2))
for i := 0; i < 65535; i++ {
tx := &wrappedBlock2.Transactions[i]
txCopy := *tx
txCopy.Nonce = uint64(i + 1)
wrappedBlock2.Transactions = append(wrappedBlock2.Transactions, txCopy)
}
bytes, err = wrappedBlock2.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
chunk = &Chunk{
Blocks: []*WrappedBlock{
wrappedBlock2,
},
}
bytes, err = chunk.Encode(0)
assert.Nil(t, bytes)
assert.Error(t, err)
assert.Contains(t, err.Error(), "number of L1 messages exceeds max uint16")
assert.Equal(t, "0x42967825696a129e7a83f082097aca982747480956dcaa448c9296e795c9a91a", hash.Hex())
}

View File

@@ -126,8 +126,8 @@ const (
ProvingTaskUnassigned
// ProvingTaskAssigned : proving_task is assigned to be proved
ProvingTaskAssigned
// ProvingTaskProvedDEPRECATED DEPRECATED: proof has been returned by prover
ProvingTaskProvedDEPRECATED
// ProvingTaskProved : proof has been returned by prover
ProvingTaskProved
// ProvingTaskVerified : proof is valid
ProvingTaskVerified
// ProvingTaskFailed : fail to generate proof
@@ -140,7 +140,7 @@ func (ps ProvingStatus) String() string {
return "unassigned"
case ProvingTaskAssigned:
return "assigned"
case ProvingTaskProvedDEPRECATED:
case ProvingTaskProved:
return "proved"
case ProvingTaskVerified:
return "verified"

View File

@@ -58,8 +58,8 @@ func TestProvingStatus(t *testing.T) {
"assigned",
},
{
"ProvingTaskProvedDEPRECATED",
ProvingTaskProvedDEPRECATED,
"ProvingTaskProved",
ProvingTaskProved,
"proved",
},
{

View File

@@ -13,18 +13,6 @@ import (
"github.com/scroll-tech/go-ethereum/rlp"
)
// ProofFailureType the proof failure type
type ProofFailureType int
const (
// ProofFailureUndefined the undefined type proof failure type
ProofFailureUndefined ProofFailureType = iota
// ProofFailurePanic proof failure for prover panic
ProofFailurePanic
// ProofFailureNoPanic proof failure for no prover panic
ProofFailureNoPanic
)
// RespStatus represents status code from prover to scroll
type RespStatus uint32
@@ -45,7 +33,7 @@ func (r ProofType) String() string {
case ProofTypeBatch:
return "proof type batch"
default:
return fmt.Sprintf("illegal proof type: %d", r)
return "illegal proof type"
}
}
@@ -72,8 +60,6 @@ type AuthMsg struct {
type Identity struct {
// ProverName the prover name
ProverName string `json:"prover_name"`
// ProverVersion the prover version
ProverVersion string `json:"prover_version"`
// Challenge unique challenge generated by manager
Challenge string `json:"challenge"`
}
@@ -267,9 +253,6 @@ type ChunkProof struct {
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
ChunkInfo *ChunkInfo `json:"chunk_info,omitempty"`
GitVersion string `json:"git_version,omitempty"`
}
// BatchProof includes the proof info that are required for batch verification and rollup.
@@ -277,8 +260,6 @@ type BatchProof struct {
Proof []byte `json:"proof"`
Instances []byte `json:"instances"`
Vk []byte `json:"vk"`
// cross-reference between cooridinator computation and prover compution
GitVersion string `json:"git_version,omitempty"`
}
// SanityCheck checks whether an BatchProof is in a legal format

View File

@@ -15,9 +15,8 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
ProverName: "test",
ProverVersion: "v1.0.0",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzgxNzUsIm9yaWdfaWF0IjoxNjkxMDM0NTc1fQ.HybBMsEJFhyZqtIa2iVcHUP7CEFttf708jmTMAImAWA",
ProverName: "test",
},
}
assert.NoError(t, authMsg.SignWithKey(privkey))
@@ -46,15 +45,14 @@ func TestGenerateToken(t *testing.T) {
func TestIdentityHash(t *testing.T) {
identity := &Identity{
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
ProverName: "test",
ProverVersion: "v1.0.0",
Challenge: "eyJhbGciOiJIUzI1NiIsInR5cCI6IkpXVCJ9.eyJleHAiOjE2OTEwMzM0MTksIm9yaWdfaWF0IjoxNjkxMDI5ODE5fQ.EhkLZsj__rNPVC3ZDYBtvdh0nB8mmM_Hl82hObaIWOs",
ProverName: "test",
}
hash, err := identity.Hash()
assert.NoError(t, err)
expectedHash := "83f5e0ad023e9c1de639ab07b9b4cb972ec9dbbd2524794c533a420a5b137721"
expectedHash := "7373bb57ab7c01307d86fec55e088e00c526d82d9d1303fa4a189ff6ea95fbe2"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
@@ -73,7 +71,6 @@ func TestProofMessageSignVerifyPublicKey(t *testing.T) {
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
},
@@ -102,13 +99,12 @@ func TestProofDetailHash(t *testing.T) {
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
}
hash, err := proofDetail.Hash()
assert.NoError(t, err)
expectedHash := "d3b57cb84b0da8043373eeb3612806fb7248d6d1b6e089846ccf3ccce2d9f31c"
expectedHash := "4165f5ab3399001002a5b8e4062914249a2deb72f6133d647b586f53e236802d"
assert.Equal(t, expectedHash, hex.EncodeToString(hash))
}
@@ -120,7 +116,7 @@ func TestProveTypeString(t *testing.T) {
assert.Equal(t, "proof type batch", proofTypeBatch.String())
illegalProof := ProofType(3)
assert.Equal(t, "illegal proof type: 3", illegalProof.String())
assert.Equal(t, "illegal proof type", illegalProof.String())
}
func TestProofMsgPublicKey(t *testing.T) {
@@ -138,7 +134,6 @@ func TestProofMsgPublicKey(t *testing.T) {
Proof: []byte("testProof"),
Instances: []byte("testInstance"),
Vk: []byte("testVk"),
ChunkInfo: nil,
},
Error: "testError",
},

View File

@@ -20,17 +20,12 @@ var (
MessageRelayerApp MockAppName = "message-relayer-test"
// RollupRelayerApp the name of mock rollup-relayer app.
RollupRelayerApp MockAppName = "rollup-relayer-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// CoordinatorApp the name of mock coordinator app.
CoordinatorApp MockAppName = "coordinator-test"
// ChunkProverApp the name of mock chunk prover app.
ChunkProverApp MockAppName = "chunkProver-test"
// BatchProverApp the name of mock batch prover app.
BatchProverApp MockAppName = "batchProver-test"
// DBCliApp the name of mock database app.
DBCliApp MockAppName = "db_cli-test"
// ProverApp the name of mock prover app.
ProverApp MockAppName = "prover-test"
)
// RegisterSimulation register initializer function for integration-test.

View File

@@ -3,11 +3,9 @@ package version
import (
"fmt"
"runtime/debug"
"strconv"
"strings"
)
var tag = "v4.1.99"
var tag = "v4.1.1"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -21,65 +19,11 @@ var commit = func() string {
}
}
}
// Set default value for integration test.
return "000000"
return ""
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover and halo2, contacted by a "-"
// The default `000000-000000` is set for integration test, and will be overwritten by coordinator's & prover's actual compilations (see their Makefiles).
var ZkVersion = "000000-000000"
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-prover
var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, prover, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)
// CheckScrollProverVersion check the "scroll-prover" version, if it's different from the local one, return false
func CheckScrollProverVersion(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
local := strings.Split(Version, "-")
if len(local) != 4 {
return false
}
// compare the `scroll_prover` version
return remote[2] == local[2]
}
// CheckScrollProverVersionTag check the "scroll-prover" version's tag, if it's too old, return false
func CheckScrollProverVersionTag(proverVersion string) bool {
// note the the version is in fact in the format of "tag-commit-scroll_prover-halo2",
// so split-by-'-' length should be 4
remote := strings.Split(proverVersion, "-")
if len(remote) != 4 {
return false
}
remoteTagNums := strings.Split(strings.TrimPrefix(remote[0], "v"), ".")
if len(remoteTagNums) != 3 {
return false
}
remoteTagMajor, err := strconv.Atoi(remoteTagNums[0])
if err != nil {
return false
}
remoteTagMinor, err := strconv.Atoi(remoteTagNums[1])
if err != nil {
return false
}
remoteTagPatch, err := strconv.Atoi(remoteTagNums[2])
if err != nil {
return false
}
if remoteTagMajor != 4 {
return false
}
if remoteTagMinor != 1 {
return false
}
if remoteTagPatch < 96 {
return false
}
return true
}

View File

@@ -260,23 +260,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -371,22 +354,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -563,27 +530,10 @@ Emitted when some ERC1155 token is refunded.
| tokenId | uint256 | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity
event UpdateTokenMapping(address indexed l1Token, address indexed oldL2Token, address indexed newL2Token)
event UpdateTokenMapping(address _l1Token, address _l2Token)
```
Emitted when token mapping for ERC1155 token is updated.
@@ -594,9 +544,8 @@ Emitted when token mapping for ERC1155 token is updated.
| Name | Type | Description |
|---|---|---|
| l1Token `indexed` | address | The address of ERC1155 token in layer 1. |
| oldL2Token `indexed` | address | The address of the old corresponding ERC1155 token in layer 2. |
| newL2Token `indexed` | address | The address of the new corresponding ERC1155 token in layer 2. |
| _l1Token | address | The address of ERC1155 token on layer 1. |
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |

View File

@@ -227,23 +227,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -502,27 +469,10 @@ Emitted when some ERC721 token is refunded.
| recipient `indexed` | address | undefined |
| tokenId | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity
event UpdateTokenMapping(address indexed l1Token, address indexed oldL2Token, address indexed newL2Token)
event UpdateTokenMapping(address _l1Token, address _l2Token)
```
Emitted when token mapping for ERC721 token is updated.
@@ -533,9 +483,8 @@ Emitted when token mapping for ERC721 token is updated.
| Name | Type | Description |
|---|---|---|
| l1Token `indexed` | address | The address of ERC721 token in layer 1. |
| oldL2Token `indexed` | address | The address of the old corresponding ERC721 token in layer 2. |
| newL2Token `indexed` | address | The address of the new corresponding ERC721 token in layer 2. |
| _l1Token | address | The address of ERC721 token on layer 1. |
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |

View File

@@ -350,7 +350,7 @@ Request ERC20 token transfer from users to gateways.
### setDefaultERC20Gateway
```solidity
function setDefaultERC20Gateway(address _newDefaultERC20Gateway) external nonpayable
function setDefaultERC20Gateway(address _defaultERC20Gateway) external nonpayable
```
Update the address of default ERC20 gateway contract.
@@ -361,7 +361,7 @@ Update the address of default ERC20 gateway contract.
| Name | Type | Description |
|---|---|---|
| _newDefaultERC20Gateway | address | undefined |
| _defaultERC20Gateway | address | The address to update. |
### setERC20Gateway
@@ -383,7 +383,7 @@ Update the mapping from token address to gateway address.
### setETHGateway
```solidity
function setETHGateway(address _newEthGateway) external nonpayable
function setETHGateway(address _ethGateway) external nonpayable
```
Update the address of ETH gateway contract.
@@ -394,7 +394,7 @@ Update the address of ETH gateway contract.
| Name | Type | Description |
|---|---|---|
| _newEthGateway | address | undefined |
| _ethGateway | address | The address to update. |
### transferOwnership
@@ -567,7 +567,7 @@ Emitted when some ETH is refunded.
### SetDefaultERC20Gateway
```solidity
event SetDefaultERC20Gateway(address indexed oldDefaultERC20Gateway, address indexed newDefaultERC20Gateway)
event SetDefaultERC20Gateway(address indexed defaultERC20Gateway)
```
Emitted when the address of default ERC20 Gateway is updated.
@@ -578,13 +578,12 @@ Emitted when the address of default ERC20 Gateway is updated.
| Name | Type | Description |
|---|---|---|
| oldDefaultERC20Gateway `indexed` | address | undefined |
| newDefaultERC20Gateway `indexed` | address | undefined |
| defaultERC20Gateway `indexed` | address | undefined |
### SetERC20Gateway
```solidity
event SetERC20Gateway(address indexed token, address indexed oldGateway, address indexed newGateway)
event SetERC20Gateway(address indexed token, address indexed gateway)
```
Emitted when the `gateway` for `token` is updated.
@@ -596,13 +595,12 @@ Emitted when the `gateway` for `token` is updated.
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| oldGateway `indexed` | address | undefined |
| newGateway `indexed` | address | undefined |
| gateway `indexed` | address | undefined |
### SetETHGateway
```solidity
event SetETHGateway(address indexed oldETHGateway, address indexed newEthGateway)
event SetETHGateway(address indexed ethGateway)
```
Emitted when the address of ETH Gateway is updated.
@@ -613,8 +611,7 @@ Emitted when the address of ETH Gateway is updated.
| Name | Type | Description |
|---|---|---|
| oldETHGateway `indexed` | address | undefined |
| newEthGateway `indexed` | address | undefined |
| ethGateway `indexed` | address | undefined |

View File

@@ -239,23 +239,6 @@ Mapping from queue index to previous replay queue index.
|---|---|---|
| _0 | uint256 | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessageWithProof
```solidity
@@ -440,7 +423,7 @@ Update fee vault contract.
### updateMaxReplayTimes
```solidity
function updateMaxReplayTimes(uint256 _newMaxReplayTimes) external nonpayable
function updateMaxReplayTimes(uint256 _maxReplayTimes) external nonpayable
```
Update max replay times.
@@ -451,23 +434,7 @@ Update max replay times.
| Name | Type | Description |
|---|---|---|
| _newMaxReplayTimes | uint256 | The new max replay times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
| _maxReplayTimes | uint256 | The new max replay times. |
### xDomainMessageSender
@@ -628,7 +595,7 @@ Emitted when owner updates fee vault contract.
### UpdateMaxReplayTimes
```solidity
event UpdateMaxReplayTimes(uint256 oldMaxReplayTimes, uint256 newMaxReplayTimes)
event UpdateMaxReplayTimes(uint256 maxReplayTimes)
```
Emitted when the maximum number of times each message can be replayed is updated.
@@ -639,25 +606,7 @@ Emitted when the maximum number of times each message can be replayed is updated
| Name | Type | Description |
|---|---|---|
| oldMaxReplayTimes | uint256 | undefined |
| newMaxReplayTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
| maxReplayTimes | uint256 | undefined |

View File

@@ -214,51 +214,6 @@ function onDropMessage(bytes _message) external payable
|---|---|---|
| _message | bytes | undefined |
### owner
```solidity
function owner() external view returns (address)
```
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### renounceOwnership
```solidity
function renounceOwnership() external nonpayable
```
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
```solidity
@@ -276,38 +231,6 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
|---|---|---|
| _0 | address | undefined |
### transferOwnership
```solidity
function transferOwnership(address newOwner) external nonpayable
```
*Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -370,23 +293,6 @@ event Initialized(uint8 version)
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
event OwnershipTransferred(address indexed previousOwner, address indexed newOwner)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### RefundERC20
```solidity
@@ -405,22 +311,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -212,51 +212,6 @@ function onDropMessage(bytes _message) external payable
|---|---|---|
| _message | bytes | undefined |
### owner
```solidity
function owner() external view returns (address)
```
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### renounceOwnership
```solidity
function renounceOwnership() external nonpayable
```
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
```solidity
@@ -274,38 +229,6 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
|---|---|---|
| _0 | address | undefined |
### transferOwnership
```solidity
function transferOwnership(address newOwner) external nonpayable
```
*Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
## Events
@@ -368,23 +291,6 @@ event Initialized(uint8 version)
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
event OwnershipTransferred(address indexed previousOwner, address indexed newOwner)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### RefundERC20
```solidity
@@ -403,22 +309,5 @@ Emitted when some ERC20 token is refunded.
| recipient `indexed` | address | undefined |
| amount | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |

View File

@@ -205,23 +205,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -316,22 +299,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -488,27 +455,10 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token)
event UpdateTokenMapping(address _l2Token, address _l1Token)
```
Emitted when token mapping for ERC1155 token is updated.
@@ -519,9 +469,8 @@ Emitted when token mapping for ERC1155 token is updated.
| Name | Type | Description |
|---|---|---|
| l2Token `indexed` | address | The address of corresponding ERC1155 token in layer 2. |
| oldL1Token `indexed` | address | The address of the old corresponding ERC1155 token in layer 1. |
| newL1Token `indexed` | address | The address of the new corresponding ERC1155 token in layer 1. |
| _l2Token | address | The address of corresponding ERC1155 token on layer 2. |
| _l1Token | address | The address of ERC1155 token on layer 1. |
### WithdrawERC1155

View File

@@ -174,23 +174,6 @@ function owner() external view returns (address)
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
@@ -263,22 +246,6 @@ function transferOwnership(address newOwner) external nonpayable
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### updateTokenMapping
```solidity
@@ -430,27 +397,10 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### UpdateTokenMapping
```solidity
event UpdateTokenMapping(address indexed l2Token, address indexed oldL1Token, address indexed newL1Token)
event UpdateTokenMapping(address _l2Token, address _l1Token)
```
Emitted when token mapping for ERC721 token is updated.
@@ -461,9 +411,8 @@ Emitted when token mapping for ERC721 token is updated.
| Name | Type | Description |
|---|---|---|
| l2Token `indexed` | address | The address of corresponding ERC721 token in layer 2. |
| oldL1Token `indexed` | address | The address of the old corresponding ERC721 token in layer 1. |
| newL1Token `indexed` | address | The address of the new corresponding ERC721 token in layer 1. |
| _l2Token | address | The address of corresponding ERC721 token on layer 2. |
| _l1Token | address | The address of ERC721 token on layer 1. |
### WithdrawERC721

View File

@@ -220,7 +220,7 @@ function renounceOwnership() external nonpayable
### setDefaultERC20Gateway
```solidity
function setDefaultERC20Gateway(address _newDefaultERC20Gateway) external nonpayable
function setDefaultERC20Gateway(address _defaultERC20Gateway) external nonpayable
```
Update the address of default ERC20 gateway contract.
@@ -231,7 +231,7 @@ Update the address of default ERC20 gateway contract.
| Name | Type | Description |
|---|---|---|
| _newDefaultERC20Gateway | address | The address to update. |
| _defaultERC20Gateway | address | The address to update. |
### setERC20Gateway
@@ -253,7 +253,7 @@ Update the mapping from token address to gateway address.
### setETHGateway
```solidity
function setETHGateway(address _newEthGateway) external nonpayable
function setETHGateway(address _ethGateway) external nonpayable
```
Update the address of ETH gateway contract.
@@ -264,7 +264,7 @@ Update the address of ETH gateway contract.
| Name | Type | Description |
|---|---|---|
| _newEthGateway | address | The address to update. |
| _ethGateway | address | The address to update. |
### transferOwnership
@@ -473,7 +473,7 @@ event OwnershipTransferred(address indexed previousOwner, address indexed newOwn
### SetDefaultERC20Gateway
```solidity
event SetDefaultERC20Gateway(address indexed oldDefaultERC20Gateway, address indexed newDefaultERC20Gateway)
event SetDefaultERC20Gateway(address indexed defaultERC20Gateway)
```
Emitted when the address of default ERC20 Gateway is updated.
@@ -484,13 +484,12 @@ Emitted when the address of default ERC20 Gateway is updated.
| Name | Type | Description |
|---|---|---|
| oldDefaultERC20Gateway `indexed` | address | undefined |
| newDefaultERC20Gateway `indexed` | address | undefined |
| defaultERC20Gateway `indexed` | address | undefined |
### SetERC20Gateway
```solidity
event SetERC20Gateway(address indexed token, address indexed oldGateway, address indexed newGateway)
event SetERC20Gateway(address indexed token, address indexed gateway)
```
Emitted when the `gateway` for `token` is updated.
@@ -502,13 +501,12 @@ Emitted when the `gateway` for `token` is updated.
| Name | Type | Description |
|---|---|---|
| token `indexed` | address | undefined |
| oldGateway `indexed` | address | undefined |
| newGateway `indexed` | address | undefined |
| gateway `indexed` | address | undefined |
### SetETHGateway
```solidity
event SetETHGateway(address indexed oldETHGateway, address indexed newEthGateway)
event SetETHGateway(address indexed ethGateway)
```
Emitted when the address of ETH Gateway is updated.
@@ -519,8 +517,7 @@ Emitted when the address of ETH Gateway is updated.
| Name | Type | Description |
|---|---|---|
| oldETHGateway `indexed` | address | undefined |
| newEthGateway `indexed` | address | undefined |
| ethGateway `indexed` | address | undefined |
### WithdrawERC20

View File

@@ -47,7 +47,7 @@ The address of fee vault, collecting cross domain messaging fee.
### initialize
```solidity
function initialize(address _counterpart) external nonpayable
function initialize(address _counterpart, address _feeVault) external nonpayable
```
@@ -59,6 +59,7 @@ function initialize(address _counterpart) external nonpayable
| Name | Type | Description |
|---|---|---|
| _counterpart | address | undefined |
| _feeVault | address | undefined |
### isL1MessageExecuted
@@ -194,23 +195,6 @@ function paused() external view returns (bool)
|---|---|---|
| _0 | bool | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of ETH rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### relayMessage
```solidity
@@ -332,7 +316,7 @@ Update fee vault contract.
### updateMaxFailedExecutionTimes
```solidity
function updateMaxFailedExecutionTimes(uint256 _newMaxFailedExecutionTimes) external nonpayable
function updateMaxFailedExecutionTimes(uint256 _maxFailedExecutionTimes) external nonpayable
```
Update max failed execution times.
@@ -343,23 +327,7 @@ Update max failed execution times.
| Name | Type | Description |
|---|---|---|
| _newMaxFailedExecutionTimes | uint256 | The new max failed execution times. |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
| _maxFailedExecutionTimes | uint256 | The new max failed execution times. |
### xDomainMessageSender
@@ -520,7 +488,7 @@ Emitted when owner updates fee vault contract.
### UpdateMaxFailedExecutionTimes
```solidity
event UpdateMaxFailedExecutionTimes(uint256 oldMaxFailedExecutionTimes, uint256 newMaxFailedExecutionTimes)
event UpdateMaxFailedExecutionTimes(uint256 maxFailedExecutionTimes)
```
Emitted when the maximum number of times each message can fail in L2 is updated.
@@ -531,25 +499,7 @@ Emitted when the maximum number of times each message can fail in L2 is updated.
| Name | Type | Description |
|---|---|---|
| oldMaxFailedExecutionTimes | uint256 | undefined |
| newMaxFailedExecutionTimes | uint256 | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
| maxFailedExecutionTimes | uint256 | The new maximum number of times each message can fail in L2. |

View File

@@ -128,51 +128,6 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---|
| _0 | address | undefined |
### owner
```solidity
function owner() external view returns (address)
```
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### renounceOwnership
```solidity
function renounceOwnership() external nonpayable
```
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
```solidity
@@ -207,38 +162,6 @@ The address of ScrollStandardERC20Factory.
|---|---|---|
| _0 | address | undefined |
### transferOwnership
```solidity
function transferOwnership(address newOwner) external nonpayable
```
*Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -337,40 +260,6 @@ event Initialized(uint8 version)
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
event OwnershipTransferred(address indexed previousOwner, address indexed newOwner)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -161,51 +161,6 @@ The address of corresponding L1ScrollMessenger/L2ScrollMessenger contract.
|---|---|---|
| _0 | address | undefined |
### owner
```solidity
function owner() external view returns (address)
```
*Returns the address of the current owner.*
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### rateLimiter
```solidity
function rateLimiter() external view returns (address)
```
The address of token rate limiter contract.
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | address | undefined |
### renounceOwnership
```solidity
function renounceOwnership() external nonpayable
```
*Leaves the contract without owner. It will not be possible to call `onlyOwner` functions. Can only be called by the current owner. NOTE: Renouncing ownership will leave the contract without an owner, thereby disabling any functionality that is only available to the owner.*
### router
```solidity
@@ -223,38 +178,6 @@ The address of L1GatewayRouter/L2GatewayRouter contract.
|---|---|---|
| _0 | address | undefined |
### transferOwnership
```solidity
function transferOwnership(address newOwner) external nonpayable
```
*Transfers ownership of the contract to a new account (`newOwner`). Can only be called by the current owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| newOwner | address | undefined |
### updateRateLimiter
```solidity
function updateRateLimiter(address _newRateLimiter) external nonpayable
```
Update rate limiter contract.
*This function can only called by contract owner.*
#### Parameters
| Name | Type | Description |
|---|---|---|
| _newRateLimiter | address | The address of new rate limiter contract. |
### withdrawERC20
```solidity
@@ -353,40 +276,6 @@ event Initialized(uint8 version)
|---|---|---|
| version | uint8 | undefined |
### OwnershipTransferred
```solidity
event OwnershipTransferred(address indexed previousOwner, address indexed newOwner)
```
#### Parameters
| Name | Type | Description |
|---|---|---|
| previousOwner `indexed` | address | undefined |
| newOwner `indexed` | address | undefined |
### UpdateRateLimiter
```solidity
event UpdateRateLimiter(address indexed _oldRateLimiter, address indexed _newRateLimiter)
```
Emitted when owner updates rate limiter contract.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _oldRateLimiter `indexed` | address | undefined |
| _newRateLimiter `indexed` | address | undefined |
### WithdrawERC20
```solidity

View File

@@ -1,6 +1,7 @@
/* eslint-disable node/no-unpublished-import */
/* eslint-disable node/no-missing-import */
import { constants } from "ethers";
import { concat } from "ethers/lib/utils";
import { ethers } from "hardhat";
import { ScrollChain, L1MessageQueue } from "../typechain";
@@ -27,7 +28,7 @@ describe("ScrollChain", async () => {
await chain.deployed();
await chain.initialize(queue.address, constants.AddressZero, 44);
await chain.addSequencer(deployer.address);
await chain.updateSequencer(deployer.address, true);
await queue.initialize(
constants.AddressZero,
chain.address,

View File

@@ -22,9 +22,7 @@ contract InitializeL1BridgeContracts is Script {
uint256 CHAIN_ID_L2 = vm.envUint("CHAIN_ID_L2");
uint256 MAX_L2_TX_IN_CHUNK = vm.envUint("MAX_L2_TX_IN_CHUNK");
uint256 MAX_L1_MESSAGE_GAS_LIMIT = vm.envUint("MAX_L1_MESSAGE_GAS_LIMIT");
address L1_COMMIT_SENDER_ADDRESS = vm.envAddress("L1_COMMIT_SENDER_ADDRESS");
address L1_FINALIZE_SENDER_ADDRESS = vm.envAddress("L1_FINALIZE_SENDER_ADDRESS");
address L1_ROLLUP_OPERATOR_ADDR = vm.envAddress("L1_ROLLUP_OPERATOR_ADDR");
address L1_FEE_VAULT_ADDR = vm.envAddress("L1_FEE_VAULT_ADDR");
address L1_WETH_ADDR = vm.envAddress("L1_WETH_ADDR");
@@ -68,8 +66,8 @@ contract InitializeL1BridgeContracts is Script {
L1_MULTIPLE_VERSION_ROLLUP_VERIFIER_ADDR,
MAX_L2_TX_IN_CHUNK
);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addSequencer(L1_COMMIT_SENDER_ADDRESS);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).addProver(L1_FINALIZE_SENDER_ADDRESS);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateSequencer(L1_ROLLUP_OPERATOR_ADDR, true);
ScrollChain(L1_SCROLL_CHAIN_PROXY_ADDR).updateProver(L1_ROLLUP_OPERATOR_ADDR, true);
// initialize L2GasPriceOracle
L2GasPriceOracle(L2_GAS_PRICE_ORACLE_PROXY_ADDR).initialize(
@@ -86,7 +84,7 @@ contract InitializeL1BridgeContracts is Script {
L1_SCROLL_CHAIN_PROXY_ADDR,
L1_ENFORCED_TX_GATEWAY_PROXY_ADDR,
L2_GAS_PRICE_ORACLE_PROXY_ADDR,
MAX_L1_MESSAGE_GAS_LIMIT
10000000
);
// initialize L1ScrollMessenger

View File

@@ -65,7 +65,10 @@ contract InitializeL2BridgeContracts is Script {
L1GasPriceOracle(L1_GAS_PRICE_ORACLE_ADDR).updateWhitelist(L2_WHITELIST_ADDR);
// initialize L2ScrollMessenger
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_PROXY_ADDR)).initialize(L1_SCROLL_MESSENGER_PROXY_ADDR);
L2ScrollMessenger(payable(L2_SCROLL_MESSENGER_PROXY_ADDR)).initialize(
L1_SCROLL_MESSENGER_PROXY_ADDR,
L2_TX_FEE_VAULT_ADDR
);
// initialize L2GatewayRouter
L2GatewayRouter(L2_GATEWAY_ROUTER_PROXY_ADDR).initialize(

View File

@@ -10,9 +10,8 @@ interface IL1ScrollMessenger is IScrollMessenger {
**********/
/// @notice Emitted when the maximum number of times each message can be replayed is updated.
/// @param oldMaxReplayTimes The old maximum number of times each message can be replayed.
/// @param newMaxReplayTimes The new maximum number of times each message can be replayed.
event UpdateMaxReplayTimes(uint256 oldMaxReplayTimes, uint256 newMaxReplayTimes);
/// @param maxReplayTimes The new maximum number of times each message can be replayed.
event UpdateMaxReplayTimes(uint256 maxReplayTimes);
/***********
* Structs *

Some files were not shown because too many files have changed in this diff Show More