Compare commits

..

81 Commits

Author SHA1 Message Date
Péter Garamvölgyi
a319dc1cff bugfix(bridge): only relay messages for finalized batches (#251)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 20:54:09 +08:00
colin
52bf3a55fc fix(coordinator): fix CollectProofs for multi-roller & add tests (#252)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 14:52:20 +08:00
Péter Garamvölgyi
598e10e4fc feat(bridge): update skipped batches in a single db operation (#242) 2023-01-18 15:21:41 +01:00
maskpp
eed3f42731 fix(coordinator): Fix bug in coordinator.GetNumberOfIdleRollers function. (#247)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-18 20:51:19 +08:00
colin
5a4bea8ccd fix(coordinator): set session failed when all rollers have submitted invalid proof (#241)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-18 20:01:43 +08:00
ChuhanJin
5b37b63d89 build(jenkinsfile): replace status_check with comment in coverage test (#249)
Co-authored-by: vincent <419436363@qq.com>
2023-01-18 19:44:47 +08:00
HAOYUatHZ
5e5c4f7701 build(roller&coordinator): fix Makefile (#245)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-18 16:51:55 +08:00
HAOYUatHZ
09dc638652 perf(db): add limit in block_batch queries (#240) 2023-01-17 20:46:51 +08:00
HAOYUatHZ
b598a01e7f build(jenkins): remove changeset condition (#239) 2023-01-17 07:07:58 +08:00
Scroll Dev
0fcdb6f824 doc: bump version number (#238) 2023-01-17 06:57:43 +08:00
Xi Lin
5a95dcf5ba bugfix(bridge&database): fix compute message hash and add unit tests (#233)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-17 06:56:21 +08:00
Xi Lin
d0c63e75df bugfix(database): make sure return order of statuses in GetRollupStatusByIDList (#235)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-16 20:57:27 +08:00
Scroll Dev
676b8a2230 Update pull_request_template.md 2023-01-15 07:46:57 +08:00
Xi Lin
a1cb3d3b87 fix(contracts&libzkp): fix mpt circuits panic (#232)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-14 14:58:37 +08:00
colin
47b4c54e05 fix(relayer): use sync map for concurrent write (#231)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-14 12:07:59 +08:00
maskpp
fe822a65b9 refactor(bridge): refactor bridge parallelization (#213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-14 09:40:07 +08:00
HAOYUatHZ
9bd4931f93 chore: fix typos in pull_request template (#230) 2023-01-13 11:50:39 +08:00
HAOYUatHZ
411cb19b62 chore: add pull_request template (#229) 2023-01-13 11:46:19 +08:00
HAOYUatHZ
8f55299941 Revert "refactor(coordinator): remove debug api namespace from manager" (#224)
Co-authored-by: Nazarii Denha <dengaaa2002@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-13 08:41:36 +08:00
HAOYUatHZ
0bdcce79ba chore: bump version number (#226) 2023-01-13 08:33:53 +08:00
Péter Garamvölgyi
fcd29c305d fix(coordinator): use uint32 for timestamp to enable RLP encoding (#225) 2023-01-12 18:24:59 +01:00
Lawliet-Chan
54a6ab472a feat(message): replace json.Marshal with rlp.Encode for Signing (#219) 2023-01-12 22:21:59 +08:00
HAOYUatHZ
b2a5baa2ad feat(bridge): upgrade TraceGasCost estimation (#222) 2023-01-12 16:45:11 +08:00
Lawliet-Chan
dc6b71ca23 chore: improve golang lint rules (#172)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-12 12:55:00 +08:00
Nazarii Denha
e1247a7eb2 refactor(coordinator): remove debug api namespace from manager (#221)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-12 10:16:48 +08:00
colin
65699b89bb feat(coordinator): support rollers-per-session (#215)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 21:49:37 +08:00
maskpp
a44956a05f fix(bug): fix data race in common/cmd module. (#220) 2023-01-11 21:05:46 +08:00
maskpp
b85b4bafc2 refactor(coordinator): refactor CollectProofs func (#211)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 13:31:42 +08:00
colin
2e3c80c580 feat(roller): add submit proof retry (#217)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-11 12:30:33 +08:00
Péter Garamvölgyi
d24392feac feat(bridge): Propose batches, fetch blocks and events in parallel (#216) 2023-01-10 23:13:06 +08:00
ChuhanJin
5c6e20a774 build(Jenkins): add docker push job for tag (#214)
Co-authored-by: vincent <419436363@qq.com>
2023-01-10 14:43:20 +08:00
HAOYUatHZ
9f9e23ff0e chore(coordinator): add more logs (#177)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-10 09:29:43 +08:00
Péter Garamvölgyi
fa93de97de feat(bridge): Relay messages and batches in parallel (#212)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-10 09:18:36 +08:00
HAOYUatHZ
deedf7a5d0 refactor(coordinator): refactor CollectProofs (#208) 2023-01-10 08:49:14 +08:00
Xi Lin
73432127cd feat(contract): only use blockHeight to verify relay message in layer1 (#209) 2023-01-09 15:03:30 +08:00
colin
a78160ddad fix(roller): avoid discarding task when connection failure (#203)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 10:52:50 +08:00
Lawliet-Chan
fff2517a76 build(libzkp): update dependency common-rs -> scroll-zkevm (#204)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 09:32:14 +08:00
Xi Lin
eba7647e21 bugfix(bridge): add suffix to id in processingFinalization and processingCommitment (#207)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-06 14:33:21 +01:00
Xi Lin
51076d21c3 bugfix(bridge): failed to fetch rollup events on restart (#201) 2023-01-06 20:39:09 +08:00
maskpp
077ed9839a feat(bug): fix bug in l2 watcher (#199) 2023-01-05 17:32:50 +08:00
colinlyguo
bdcca55bd5 fix: roller test path (#202)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-05 11:41:45 +08:00
Xi Lin
20b8e2bf6c feat(contract): add finalization status api in contract (#197)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-04 23:26:47 +08:00
ChuhanJin
cc596c42b3 build: fix rate limit failure by using solc static bin (#200)
Co-authored-by: vincent <419436363@qq.com>
2023-01-04 21:52:10 +08:00
HAOYUatHZ
7da717b251 chore(bridge): update l2 contractEventsBlocksFetchLimit (#198) 2023-01-04 15:47:35 +08:00
HAOYUatHZ
bbdbf3995f feat(bridge): fast catch up new blocks (#184)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-04 15:43:14 +08:00
HAOYUatHZ
7fb8bc6e29 fix(test): fix TestCmd (#194) 2023-01-02 19:38:37 +08:00
HAOYUatHZ
b8fae294e4 build(Jenkins): use parallel testing (#169)
Co-authored-by: Scroll Dev <dev@scroll.io>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2022-12-29 16:59:06 +08:00
maskpp
23bc381f5c fix(integration-test): fixtestStartProcess (#193) 2022-12-29 16:43:06 +08:00
maskpp
b4ade85a9c feat(roller): If roller is stopped, don't need to retry again. (#191)
Co-authored-by: vincent <419436363@qq.com>
2022-12-28 16:23:29 +08:00
HAOYUatHZ
d04522027c refactor: clean up codes (#187)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-28 10:55:56 +08:00
ChuhanJin
7422bea51f build(jenkinsfile): add coverage (#181)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-27 22:18:17 +08:00
Lawliet-Chan
abcc159390 chore(libzkp): short writings makefile in roller and coordinator (#189) 2022-12-27 18:58:09 +08:00
HAOYUatHZ
a545954dbc chore(bridge): increase blockTracesFetchLimit & `contractEventsBlock… (#185) 2022-12-26 17:55:03 +08:00
HAOYUatHZ
dc6ef83fbd chore: bump version number (#182) 2022-12-26 14:32:11 +08:00
HAOYUatHZ
e17647bc9f chore(batch_proposer): add more check for batchTxNumThreshold (#180) 2022-12-26 14:16:39 +08:00
HAOYUatHZ
feaa95aefe Revert "feat(libzkp): unwind panic (#163)" (#179) 2022-12-26 11:50:27 +08:00
HAOYUatHZ
8ad8a1b6f0 feat(batch_proposer): add batchTxNumThreshold (#178) 2022-12-26 10:17:16 +08:00
maskpp
22f6781c26 feat(Integration test): Enable run child process in integration-test test cases. (#102)
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-22 22:13:04 +08:00
HAOYUatHZ
b165402e81 refactor(bridge): refactor watcher confirmation check and add more logs (#173)
Co-authored-by: Scroll Dev <dev@scroll.io>
2022-12-22 21:26:52 +08:00
HAOYUatHZ
9ee8d977cb build: revert coverage test (#176) 2022-12-22 20:10:32 +08:00
HAOYUatHZ
e1a6eb65f6 build: decrease coverage threshold to 40% (#175) 2022-12-22 15:31:05 +08:00
maskpp
9096334eab chore(database): rename block_result to block_trace (#174) 2022-12-22 13:47:59 +08:00
HAOYUatHZ
c360cf52b1 build: improve dockerfiles (#171) 2022-12-20 11:04:18 +08:00
HAOYUatHZ
00dc075d9c refactor: clean up codes (#168) 2022-12-19 20:06:00 +08:00
ChuhanJin
af77c9fa83 build(jenkinsfile): add coverage (#139)
Co-authored-by: vincent <419436363@qq.com>
2022-12-19 17:07:10 +08:00
maskpp
1c4ed0487a fix(tests): fix docker workflow for tests (#152)
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-19 14:49:01 +08:00
HAOYUatHZ
e4761d9694 refactor(roller): reorg directory (#167) 2022-12-19 11:16:40 +08:00
maskpp
fbc7e03c67 fix(test): Fix weak test code in coordinator module (#165)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-16 15:06:46 +08:00
HAOYUatHZ
927011641d refactor(bridge): remove L2Watcher ReplayBlockResultByHash API (#144)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-16 10:45:46 +08:00
Lawliet-Chan
6eb71869e8 feat(libzkp): unwind panic (#163)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-15 20:26:38 +08:00
Xi Lin
91dcd51d55 fix(database): handle empty message array (#166)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-15 17:09:37 +08:00
Nazarii Denha
479a061ee6 perf: optimize block trace db usage (#162)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-14 21:43:33 +08:00
ChuhanJin
f9c6fcf801 build(Jenkinsfile): move "docker cleanup" to an individual following task (#141)
Co-authored-by: vincent <419436363@qq.com>
2022-12-14 15:26:57 +08:00
HAOYUatHZ
d463c6fb72 feat(batch_proposer): add batch_proposer configuration (#129)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-14 10:45:41 +08:00
maskpp
5a51d7d3f0 refactor: fix lint error (#161) 2022-12-13 21:23:24 +08:00
HAOYUatHZ
450ff4b51f build: bump version to prealpha-v7.3 (#160) 2022-12-13 13:55:51 +08:00
colinlyguo
807845a633 build(jenkins): parallel build binaries & docker images (#156)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-13 13:32:48 +08:00
maskpp
87fc9a8cc9 feat(l2geth version): bump l2geth version (#159) 2022-12-13 13:15:20 +08:00
Lawliet-Chan
a63fe6924a feat: Add version into coordinator (#157) 2022-12-13 11:58:13 +08:00
maskpp
41414e36bc feat(ws log): Add err log when client stop the ws connection. (#158) 2022-12-13 11:47:31 +08:00
HAOYUatHZ
d2a30504e9 build(github CI): replace actions/cache@v2 with `Swatinem/rust-cach… (#155) 2022-12-13 11:00:01 +08:00
141 changed files with 5051 additions and 2085 deletions

7
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,7 @@
1. Purpose or design rationale of this PR
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

View File

@@ -31,7 +31,11 @@ jobs:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: pontem-network/get-solc@master
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint

View File

@@ -35,21 +35,10 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo registry
uses: actions/cache@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-roller-cargo-registry-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ runner.os }}-roller-cargo-index-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Cache cargo target
uses: actions/cache@v2
with:
path: /home/runner/work/scroll/scroll/common/libzkp/impl/target
key: ${{ runner.os }}-roller-cargo-build-target-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
workspaces: "common/libzkp/impl -> target"
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint

View File

@@ -35,21 +35,6 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo registry
uses: actions/cache@v2
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-roller-cargo-registry-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ runner.os }}-roller-cargo-index-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Cache cargo target
uses: actions/cache@v2
with:
path: /home/runner/work/scroll/scroll/common/libzkp/impl/target
key: ${{ runner.os }}-roller-cargo-build-target-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Lint
run: |
rm -rf $HOME/.cache/golangci-lint

View File

@@ -35,21 +35,10 @@ jobs:
go-version: 1.18.x
- name: Checkout code
uses: actions/checkout@v2
- name: Cache cargo registry
uses: actions/cache@v2
- name: Cache cargo
uses: Swatinem/rust-cache@v2
with:
path: ~/.cargo/registry
key: ${{ runner.os }}-roller-cargo-registry-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Cache cargo index
uses: actions/cache@v2
with:
path: ~/.cargo/git
key: ${{ runner.os }}-roller-cargo-index-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
- name: Cache cargo target
uses: actions/cache@v2
with:
path: /home/runner/work/scroll/scroll/common/libzkp/impl/target
key: ${{ runner.os }}-roller-cargo-build-target-${{ hashFiles('common/libzkp/impl/Cargo.lock') }}
workspaces: "common/libzkp/impl -> target"
- name: Test
run: |
make roller

143
Jenkinsfile vendored
View File

@@ -8,6 +8,7 @@ pipeline {
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
@@ -16,69 +17,103 @@ pipeline {
}
stages {
stage('Build') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
parallel {
stage('Build Prerequisite') {
steps {
sh 'make dev_docker'
sh 'make -C bridge mock_abi'
}
}
stage('Check Bridge Compilation') {
steps {
sh 'make -C bridge bridge'
}
}
stage('Check Coordinator Compilation') {
steps {
sh 'export PATH=/home/ubuntu/go/bin:$PATH'
sh 'make -C coordinator coordinator'
}
}
stage('Check Database Compilation') {
steps {
sh 'make -C database db_cli'
}
}
stage('Check Bridge Docker Build') {
steps {
sh 'make -C bridge docker'
}
}
stage('Check Coordinator Docker Build') {
steps {
sh 'make -C coordinator docker'
}
}
stage('Check Database Docker Build') {
steps {
sh 'make -C database docker'
}
}
}
steps {
// start to build project
sh '''#!/bin/bash
set -e
export PATH=/home/ubuntu/go/bin:$PATH
make dev_docker
make -C bridge mock_abi
# check compilation
make -C bridge bridge
make -C coordinator coordinator
make -C database db_cli
# check docker build
make -C bridge docker
make -C coordinator docker
make -C database docker
'''
}
}
stage('Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
stage('Parallel Test') {
parallel{
stage('Test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
}
}
stage('Test common package') {
steps {
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
}
}
stage('Test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
}
}
stage('Test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -race -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
}
}
stage('Race test bridge package') {
steps {
sh "cd bridge && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test coordinator package') {
steps {
sh "cd coordinator && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test database package') {
steps {
sh "cd database && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
}
}
stage('Compare Coverage') {
steps {
sh "docker ps -aq | xargs -r docker stop"
sh "docker container prune -f"
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
sh '''
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/...
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/...
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/...
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/...
cd ..
'''
script {
for (i in ['bridge', 'coordinator', 'database']) {
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
}
}
}
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}
post {
always {
post {
always {
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}

View File

@@ -14,11 +14,11 @@ lint: ## The code's format and security checks.
update: ## update dependencies
go work sync
cd $(PWD)/bridge/ && go mod tidy
cd $(PWD)/common/ && go mod tidy
cd $(PWD)/coordinator/ && go mod tidy
cd $(PWD)/database/ && go mod tidy
cd $(PWD)/roller/ && go mod tidy
cd $(PWD)/bridge/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/common/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/coordinator/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/database/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
cd $(PWD)/roller/ && go get -u github.com/scroll-tech/go-ethereum@staging && go mod tidy
goimports -local $(PWD)/bridge/ -w .
goimports -local $(PWD)/common/ -w .
goimports -local $(PWD)/coordinator/ -w .

View File

@@ -5,7 +5,8 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/Mock_Bridge.sol --pkg mock_bridge --out mock_bridge/Mock_Bridge.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd

View File

@@ -22,16 +22,6 @@ make clean
make bridge
```
## DB config
* db settings in config
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
```
## Start
* use default ports and config.json

View File

@@ -9,20 +9,20 @@ import (
)
const (
// SENT_MESSAGE_EVENT_SIGNATURE = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SENT_MESSAGE_EVENT_SIGNATURE = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("RelayedMessage(bytes32)")
RELAYED_MESSAGE_EVENT_SIGNATURE = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
RelayedMessageEventSignature = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("FailedRelayedMessage(bytes32)")
FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
FailedRelayedMessageEventSignature = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// COMMIT_BATCH_EVENT_SIGNATURE = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
COMMIT_BATCH_EVENT_SIGNATURE = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// CommitBatchEventSignature = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
CommitBatchEventSignature = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// FINALIZED_BATCH_EVENT_SIGNATURE = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FINALIZED_BATCH_EVENT_SIGNATURE = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
// FinalizedBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FinalizedBatchEventSignature = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
)
var (

125
bridge/cmd/app/app.go Normal file
View File

@@ -0,0 +1,125 @@
package app
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd(t, "bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

View File

@@ -1,6 +1,8 @@
package main
package app
import "github.com/urfave/cli/v2"
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
@@ -26,22 +28,4 @@ var (
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
l1Flags = []cli.Flag{
&l1UrlFlag,
}
l1UrlFlag = cli.StringFlag{
Name: "l1.endpoint",
Usage: "The endpoint connect to l1chain node",
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
}
l2Flags = []cli.Flag{
&l2UrlFlag,
}
l2UrlFlag = cli.StringFlag{
Name: "l2.endpoint",
Usage: "The endpoint connect to l2chain node",
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
}
)

View File

@@ -1,125 +1,7 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
import "scroll-tech/bridge/cmd/app"
func main() {
// Set up Bridge app info.
app := cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, l1Flags...)
app.Flags = append(app.Flags, l2Flags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Run the sequencer.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(l1UrlFlag.Name) {
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
}
if ctx.IsSet(l2UrlFlag.Name) {
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
apis := l2Backend.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start HTTP api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
app.Run()
}

View File

@@ -3,6 +3,7 @@
"confirmations": 6,
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
@@ -24,16 +25,11 @@
},
"l2_config": {
"confirmations": 1,
"proof_generation_freq": 1,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
],
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
"l2_messenger_address": "0x0000000000000000000000000000000000000000",
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
"sender_config": {
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"check_pending_time": 10,
@@ -48,9 +44,20 @@
"message_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
],
"roller_sender_private_keys": [
"rollup_sender_private_keys": [
"1212121212121212121212121212121212121212121212121212121212121212"
]
},
"batch_proposer_config": {
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 135,
"batch_time_sec": 300,
"batch_blocks_limit": 100,
"skipped_opcodes": [
"CREATE2",
"DELEGATECALL"
]
}
},
"db_config": {

View File

@@ -1,153 +1,18 @@
package config
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"os"
"path/filepath"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"scroll-tech/common/utils"
db_config "scroll-tech/database"
"scroll-tech/database"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance,omitempty"`
}
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes map[string]struct{} `json:"-"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}
// L2ConfigAlias L2Config alias name, designed just for unmarshal.
type L2ConfigAlias L2Config
// UnmarshalJSON unmarshal l2config.
func (l2 *L2Config) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
L2ConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
*l2 = L2Config(jsonConfig.L2ConfigAlias)
l2.SkippedOpcodes = make(map[string]struct{}, len(jsonConfig.SkippedOpcodes))
for _, opcode := range jsonConfig.SkippedOpcodes {
l2.SkippedOpcodes[opcode] = struct{}{}
}
if 0 == l2.ProofGenerationFreq {
l2.ProofGenerationFreq = 1
}
return nil
}
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// RelayerConfigAlias RelayerConfig alias name
type RelayerConfigAlias RelayerConfig
// UnmarshalJSON unmarshal relayer_config struct.
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
RelayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"roller_sender_private_keys,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
// Get messenger private key list.
*r = RelayerConfig(jsonConfig.RelayerConfigAlias)
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
}
// Get rollup private key
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect roller_private_key format, err: %v", err)
}
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
}
return nil
}
// Config load configuration items.
type Config struct {
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
L1Config *L1Config `json:"l1_config"`
L2Config *L2Config `json:"l2_config"`
DBConfig *database.DBConfig `json:"db_config"`
}
// NewConfig returns a new instance of Config.
@@ -163,9 +28,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
return cfg, nil
}

View File

@@ -1,7 +1,11 @@
package config_test
import (
"encoding/json"
"fmt"
"os"
"testing"
"time"
"github.com/stretchr/testify/assert"
@@ -11,9 +15,25 @@ import (
func TestConfig(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.True(t, assert.NoError(t, err), "failed to load config")
assert.True(t, len(cfg.L2Config.SkippedOpcodes) == 2)
assert.True(t, cfg.L2Config.ProofGenerationFreq == 1)
assert.True(t, len(cfg.L2Config.BatchProposerConfig.SkippedOpcodes) > 0)
assert.True(t, len(cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys) > 0)
assert.True(t, len(cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys) > 0)
data, err := json.Marshal(cfg)
assert.NoError(t, err)
tmpJosn := fmt.Sprintf("/tmp/%d_bridge_config.json", time.Now().Nanosecond())
defer func() { _ = os.Remove(tmpJosn) }()
assert.NoError(t, os.WriteFile(tmpJosn, data, 0644))
cfg2, err := config.NewConfig(tmpJosn)
assert.NoError(t, err)
assert.Equal(t, cfg.L1Config, cfg2.L1Config)
assert.Equal(t, cfg.L2Config, cfg2.L2Config)
assert.Equal(t, cfg.DBConfig, cfg2.DBConfig)
}

View File

@@ -0,0 +1,19 @@
package config
import "github.com/scroll-tech/go-ethereum/common"
// L1Config loads l1eth configuration items.
type L1Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l1 eth node url.
Endpoint string `json:"endpoint"`
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address"`
// The rollup contract address deployed on layer 1 chain.
RollupContractAddress common.Address `json:"rollup_contract_address"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}

View File

@@ -0,0 +1,73 @@
package config
import (
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
)
// L2Config loads l2geth configuration items.
type L2Config struct {
// Confirmations block height confirmations number.
Confirmations uint64 `json:"confirmations"`
// l2geth node url.
Endpoint string `json:"endpoint"`
// The messenger contract address deployed on layer 2 chain.
L2MessengerAddress common.Address `json:"l2_messenger_address,omitempty"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
// The batch_proposer config
BatchProposerConfig *BatchProposerConfig `json:"batch_proposer_config"`
}
// BatchProposerConfig loads l2watcher batch_proposer configuration items.
type BatchProposerConfig struct {
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Txnum threshold in a batch
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
// Gas threshold in a batch
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met
BatchTimeSec uint64 `json:"batch_time_sec"`
// Max number of blocks in a batch
BatchBlocksLimit uint64 `json:"batch_blocks_limit"`
// Skip generating proof when that opcodes appeared
SkippedOpcodes map[string]struct{} `json:"-"`
}
// batchProposerConfigAlias RelayerConfig alias name
type batchProposerConfigAlias BatchProposerConfig
// UnmarshalJSON unmarshal BatchProposerConfig config struct.
func (b *BatchProposerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
*b = BatchProposerConfig(jsonConfig.batchProposerConfigAlias)
b.SkippedOpcodes = make(map[string]struct{}, len(jsonConfig.SkippedOpcodes))
for _, opcode := range jsonConfig.SkippedOpcodes {
b.SkippedOpcodes[opcode] = struct{}{}
}
return nil
}
// MarshalJSON marshal BatchProposerConfig in order to transfer skipOpcodes.
func (b *BatchProposerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
batchProposerConfigAlias
SkippedOpcodes []string `json:"skipped_opcodes,omitempty"`
}{batchProposerConfigAlias(*b), nil}
// Load skipOpcodes.
for op := range b.SkippedOpcodes {
jsonConfig.SkippedOpcodes = append(jsonConfig.SkippedOpcodes, op)
}
return json.Marshal(&jsonConfig)
}

View File

@@ -0,0 +1,107 @@
package config
import (
"crypto/ecdsa"
"encoding/json"
"fmt"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
)
// SenderConfig The config for transaction sender
type SenderConfig struct {
// The RPC endpoint of the ethereum or scroll public node.
Endpoint string `json:"endpoint"`
// The time to trigger check pending txs in sender.
CheckPendingTime uint64 `json:"check_pending_time"`
// The number of blocks to wait to escalate increase gas price of the transaction.
EscalateBlocks uint64 `json:"escalate_blocks"`
// The gap number between a block be confirmed and the latest block.
Confirmations uint64 `json:"confirmations"`
// The numerator of gas price escalate multiple.
EscalateMultipleNum uint64 `json:"escalate_multiple_num"`
// The denominator of gas price escalate multiple.
EscalateMultipleDen uint64 `json:"escalate_multiple_den"`
// The maximum gas price can be used to send transaction.
MaxGasPrice uint64 `json:"max_gas_price"`
// The transaction type to use: LegacyTx, AccessListTx, DynamicFeeTx
TxType string `json:"tx_type"`
// The min balance set for check and set balance for sender's accounts.
MinBalance *big.Int `json:"min_balance,omitempty"`
}
// RelayerConfig loads relayer configuration items.
// What we need to pay attention to is that
// `MessageSenderPrivateKeys` and `RollupSenderPrivateKeys` cannot have common private keys.
type RelayerConfig struct {
// RollupContractAddress store the rollup contract address.
RollupContractAddress common.Address `json:"rollup_contract_address,omitempty"`
// MessengerContractAddress store the scroll messenger contract address.
MessengerContractAddress common.Address `json:"messenger_contract_address"`
// sender config
SenderConfig *SenderConfig `json:"sender_config"`
// The private key of the relayer
MessageSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
RollupSenderPrivateKeys []*ecdsa.PrivateKey `json:"-"`
}
// relayerConfigAlias RelayerConfig alias name
type relayerConfigAlias RelayerConfig
// UnmarshalJSON unmarshal relayer_config struct.
func (r *RelayerConfig) UnmarshalJSON(input []byte) error {
var jsonConfig struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}
if err := json.Unmarshal(input, &jsonConfig); err != nil {
return err
}
// Get messenger private key list.
*r = RelayerConfig(jsonConfig.relayerConfigAlias)
for _, privStr := range jsonConfig.MessageSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect private_key_list format, err: %v", err)
}
r.MessageSenderPrivateKeys = append(r.MessageSenderPrivateKeys, priv)
}
// Get rollup private key
for _, privStr := range jsonConfig.RollupSenderPrivateKeys {
priv, err := crypto.ToECDSA(common.FromHex(privStr))
if err != nil {
return fmt.Errorf("incorrect roller_private_key format, err: %v", err)
}
r.RollupSenderPrivateKeys = append(r.RollupSenderPrivateKeys, priv)
}
return nil
}
// MarshalJSON marshal RelayerConfig config, transfer private keys.
func (r *RelayerConfig) MarshalJSON() ([]byte, error) {
jsonConfig := struct {
relayerConfigAlias
// The private key of the relayer
MessageSenderPrivateKeys []string `json:"message_sender_private_keys"`
RollupSenderPrivateKeys []string `json:"rollup_sender_private_keys,omitempty"`
}{relayerConfigAlias(*r), nil, nil}
// Transfer message sender private keys to hex type.
for _, priv := range r.MessageSenderPrivateKeys {
jsonConfig.MessageSenderPrivateKeys = append(jsonConfig.MessageSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
// Transfer rollup sender private keys to hex type.
for _, priv := range r.RollupSenderPrivateKeys {
jsonConfig.RollupSenderPrivateKeys = append(jsonConfig.RollupSenderPrivateKeys, common.Bytes2Hex(crypto.FromECDSA(priv)))
}
return json.Marshal(&jsonConfig)
}

View File

@@ -5,10 +5,11 @@ go 1.18
require (
github.com/iden3/go-iden3-crypto v0.0.13
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
@@ -17,7 +18,7 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.10.23 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
@@ -27,18 +28,18 @@ require (
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -111,8 +111,8 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.23 h1:Xk8XAT4/UuqcjMLIMF+7imjkg32kfVFKoeyQDaO2yWM=
github.com/ethereum/go-ethereum v1.10.23/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -336,6 +336,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
@@ -348,10 +350,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6 h1:o0Gq2d8nus6x82apluA5RJwjlOca7LIlpAfxlyvQvxs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0 h1:c0GRNELUyAtyuiwllQKT1XjNbs7NRGfguKouiyLfFNY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -422,8 +425,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -538,8 +541,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -551,8 +554,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -662,5 +664,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -31,7 +31,7 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RelayerConfig.RollupContractAddress, orm)
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
return &Backend{
cfg: cfg,

65
bridge/l1/l1_test.go Normal file
View File

@@ -0,0 +1,65 @@
package l1
import (
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
)
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func TestL1(t *testing.T) {
setupEnv(t)
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("testStartWatcher", testStartWatcher)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
@@ -51,7 +52,8 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Error("new sender failed", "err", err)
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new sender failed", "main address", addr.String(), "err", err)
return nil, err
}

View File

@@ -1,4 +1,4 @@
package l1_test
package l1
import (
"context"
@@ -9,37 +9,21 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/database"
"scroll-tech/common/docker"
)
// TestCreateNewRelayer test create new relayer instance and stop
func TestCreateNewL1Relayer(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
l1docker := docker.NewTestL1Docker(t)
defer l1docker.Stop()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1docker.Endpoint()
cfg.L1Config.Endpoint = l1docker.Endpoint()
client, err := ethclient.Dial(l1docker.Endpoint())
assert.NoError(t, err)
dbImg := docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
defer dbImg.Stop()
cfg.DBConfig.DSN = dbImg.Endpoint()
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()

View File

@@ -5,7 +5,7 @@ import (
"math/big"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -82,22 +82,23 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
// Start the Watcher module.
func (w *Watcher) Start() {
go func() {
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
for ; true; <-ticker.C {
select {
case <-ticker.C:
case <-w.stop:
return
default:
blockNumber, err := w.client.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get block number", "err", err)
continue
}
if err := w.fetchContractEvent(blockNumber); err != nil {
if err := w.FetchContractEvent(blockNumber); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
case <-w.stop:
return
}
}
}()
@@ -111,105 +112,112 @@ func (w *Watcher) Stop() {
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) fetchContractEvent(blockHeight uint64) error {
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
if toBlock < fromBlock {
return nil
}
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
}
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(toBlock)
return nil
}
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
// use rollup event to update rollup results db status
var batchIDs []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length")
return nil
}
// use rollup event to update rollup results db status
var batchIDs []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
return nil
}
for index, event := range rollupEvents {
batchID := event.batchID.String()
status := statuses[index]
if event.status != status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
for index, event := range rollupEvents {
batchID := event.batchID.String()
status := statuses[index]
// only update when db status is before event status
if event.status > status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err
}
w.processedMsgHeight = uint64(to)
}
err = w.db.SaveL1Messages(w.ctx, sentMessageEvents)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
}
return err
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
@@ -221,7 +229,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
@@ -242,7 +250,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -253,7 +261,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -264,7 +272,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -275,7 +283,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
@@ -295,7 +303,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
status: orm.RollupCommitted,
})
case common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash

29
bridge/l1/watcher_test.go Normal file
View File

@@ -0,0 +1,29 @@
package l1
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testStartWatcher(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
}

View File

@@ -3,7 +3,6 @@ package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
@@ -28,12 +27,12 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
relayer, err := NewLayer2Relayer(ctx, client, cfg.ProofGenerationFreq, cfg.SkippedOpcodes, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.ProofGenerationFreq, cfg.SkippedOpcodes, cfg.L2MessengerAddress, orm)
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
return &Backend{
cfg: cfg,
@@ -67,8 +66,3 @@ func (l2 *Backend) APIs() []rpc.API {
},
}
}
// MockBlockTrace for test case
func (l2 *Backend) MockBlockTrace(blockTrace *types.BlockTrace) {
l2.l2Watcher.Send(blockTrace)
}

View File

@@ -2,68 +2,104 @@ package l2
import (
"fmt"
"sync"
"time"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
)
// batch-related config
const (
batchTimeSec = uint64(5 * 60) // 5min
batchGasThreshold = uint64(3_000_000)
batchBlocksLimit = uint64(100)
)
type batchProposer struct {
mutex sync.Mutex
// TODO:
// + generate batch parallelly
// + TraceHasUnsupportedOpcodes
// + proofGenerationFreq
func (w *WatcherClient) tryProposeBatch() error {
w.bpMutex.Lock()
defer w.bpMutex.Unlock()
orm database.OrmFactory
batchTimeSec uint64
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
}
func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory) *batchProposer {
return &batchProposer{
mutex: sync.Mutex{},
orm: orm,
batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
proofGenerationFreq: cfg.ProofGenerationFreq,
skippedOpcodes: cfg.SkippedOpcodes,
}
}
func (w *batchProposer) tryProposeBatch(wg *sync.WaitGroup) {
defer wg.Done()
w.mutex.Lock()
defer w.mutex.Unlock()
blocks, err := w.orm.GetUnbatchedBlocks(
map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", batchBlocksLimit),
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
return err
log.Error("failed to get unbatched blocks", "err", err)
return
}
if len(blocks) == 0 {
return nil
return
}
if blocks[0].GasUsed > batchGasThreshold {
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
return w.createBatchForBlocks(blocks[:1])
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var (
length = len(blocks)
gasUsed uint64
length = len(blocks)
gasUsed, txNum uint64
)
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if gasUsed+block.GasUsed > batchGasThreshold {
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
blocks = blocks[:i]
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+batchTimeSec > uint64(time.Now().Unix()) {
return nil
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return
}
return w.createBatchForBlocks(blocks)
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *WatcherClient) createBatchForBlocks(blocks []*orm.BlockInfo) error {
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {
dbTx, err := w.orm.Beginx()
if err != nil {
return err

View File

@@ -0,0 +1,66 @@
package l2
import (
"encoding/json"
"fmt"
"math/big"
"os"
"sync"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/common/utils"
)
func testBatchProposer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
trace2 := &types.BlockTrace{}
trace3 := &types.BlockTrace{}
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace2)
assert.NoError(t, err)
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace3)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
proposer := newBatchProposer(&config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
var wg sync.WaitGroup
wg.Add(1)
proposer.tryProposeBatch(&wg)
wg.Wait()
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, true, len(infos) == 0)
exist, err := db.BatchRecordExist(id)
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -1,4 +1,4 @@
package l2_test
package l2
import (
"testing"
@@ -71,13 +71,15 @@ func TestFunction(t *testing.T) {
t.Run("TestCreateNewWatcherAndStop", testCreateNewWatcherAndStop)
t.Run("TestMonitorBridgeContract", testMonitorBridgeContract)
t.Run("TestFetchMultipleSentMessageInOneBlock", testFetchMultipleSentMessageInOneBlock)
t.Run("TestTraceHasUnsupportedOpcodes", testTraceHasUnsupportedOpcodes)
// Run l2 relayer test cases.
t.Run("TestCreateNewRelayer", testCreateNewRelayer)
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)
t.Cleanup(func() {
free(t)

View File

@@ -3,15 +3,19 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
@@ -29,11 +33,7 @@ import (
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
client *ethclient.Client
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
ctx context.Context
db database.OrmFactory
cfg *config.RelayerConfig
@@ -46,20 +46,23 @@ type Layer2Relayer struct {
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// a list of processing batch commitment, indexed by batch id
processingCommitment map[string]string
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// a list of processing batch finalization, indexed by batch id
processingFinalization map[string]string
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, proofGenFreq uint64, skippedOpcodes map[string]struct{}, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
@@ -75,7 +78,6 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, proofGen
return &Layer2Relayer{
ctx: ctx,
client: ethClient,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
@@ -84,25 +86,49 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, proofGen
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
processingMessage: map[string]string{},
processingCommitment: map[string]string{},
processingFinalization: map[string]string{},
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
defer wg.Done()
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d ORDER BY nonce ASC LIMIT %d", batch.EndBlockNumber, processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
for _, msg := range msgs {
if err := r.processSavedEvent(msg); err != nil {
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
@@ -111,25 +137,13 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
// @todo add support to relay multiple messages
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return err
}
if batch.EndBlockNumber < msg.Height {
// log.Warn("corresponding block not finalized", "status", status)
return nil
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(int64(batch.Index)),
BatchIndex: big.NewInt(0).SetUint64(index),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
@@ -167,14 +181,15 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage[msg.MsgHash] = msg.MsgHash
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches() {
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
defer wg.Done()
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches()
batchesInDB, err := r.db.GetPendingBatches(1)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -245,27 +260,39 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
hash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
}
return
}
log.Info("commitBatch in layer1", "id", id, "index", batch.Index, "hash", hash)
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingCommitment[id] = id
r.processingCommitment.Store(txID, id)
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
defer wg.Done()
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches()
batches, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
@@ -293,6 +320,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
@@ -337,7 +366,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
txHash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
@@ -345,15 +376,15 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
return
}
log.Info("finalizeBatchWithProof in layer1", "id", id, "hash", hash)
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "id", id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
}
success = true
r.processingFinalization[id] = id
r.processingFinalization.Store(txID, id)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
@@ -372,9 +403,12 @@ func (r *Layer2Relayer) Start() {
for {
select {
case <-ticker.C:
r.ProcessSavedEvents()
r.ProcessPendingBatches()
r.ProcessCommittedBatches()
var wg = sync.WaitGroup{}
wg.Add(3)
go r.ProcessSavedEvents(&wg)
go r.ProcessPendingBatches(&wg)
go r.ProcessCommittedBatches(&wg)
wg.Wait()
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
@@ -399,42 +433,36 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage[confirmation.ID]; ok {
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash, orm.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash, "err", err)
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
delete(r.processingMessage, confirmation.ID)
r.processingMessage.Delete(confirmation.ID)
}
// check whether it is block commitment transaction
if batch_id, ok := r.processingCommitment[confirmation.ID]; ok {
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchCommitment"
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupCommitted)
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
delete(r.processingCommitment, confirmation.ID)
r.processingCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batch_id, ok := r.processingFinalization[confirmation.ID]; ok {
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
}
delete(r.processingFinalization, confirmation.ID)
// try to delete block trace
err = r.db.DeleteTracesByBatchID(batch_id)
if err != nil {
log.Warn("DeleteTracesByBatchID failed", "batch_id", batch_id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}

View File

@@ -1,18 +1,17 @@
package l2_test
package l2
import (
"context"
"encoding/json"
"math/big"
"os"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/l2"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
@@ -42,7 +41,7 @@ func testCreateNewRelayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, cfg.L2Config.ProofGenerationFreq, cfg.L2Config.SkippedOpcodes, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -57,7 +56,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, l2Cfg.ProofGenerationFreq, l2Cfg.SkippedOpcodes, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -76,7 +75,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
},
},
}
err = db.InsertBlockTraces(context.Background(), traces)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
@@ -96,7 +95,10 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessSavedEvents(&wg)
wg.Wait()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
@@ -111,7 +113,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, l2Cfg.ProofGenerationFreq, l2Cfg.SkippedOpcodes, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -132,7 +134,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
assert.NoError(t, err)
traces = append(traces, blockTrace)
err = db.InsertBlockTraces(context.Background(), traces)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
@@ -152,7 +154,10 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
relayer.ProcessPendingBatches()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessPendingBatches(&wg)
wg.Wait()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
@@ -168,7 +173,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, l2Cfg.ProofGenerationFreq, l2Cfg.SkippedOpcodes, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -189,9 +194,80 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, provingStatus)
assert.NoError(t, err)
return batchID
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
}
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
}
}

View File

@@ -8,7 +8,7 @@ import (
"sync"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -21,6 +21,8 @@ import (
"scroll-tech/database"
"scroll-tech/database/orm"
"scroll-tech/bridge/config"
)
type relayedMessage struct {
@@ -38,11 +40,9 @@ type WatcherClient struct {
orm database.OrmFactory
confirmations uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
messengerAddress common.Address
messengerABI *abi.ABI
confirmations uint64
messengerAddress common.Address
messengerABI *abi.ABI
// The height of the block that the watcher has retrieved event logs
processedMsgHeight uint64
@@ -50,12 +50,11 @@ type WatcherClient struct {
stopped uint64
stopCh chan struct{}
// mutex for batch proposer
bpMutex sync.Mutex
batchProposer *batchProposer
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, proofGenFreq uint64, skippedOpcodes map[string]struct{}, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
@@ -63,18 +62,16 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
}
return &WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
proofGenerationFreq: proofGenFreq,
skippedOpcodes: skippedOpcodes,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2MessengerMetaABI,
stopCh: make(chan struct{}),
stopped: 0,
bpMutex: sync.Mutex{},
ctx: ctx,
Client: client,
orm: orm,
processedMsgHeight: uint64(savedHeight),
confirmations: confirmations,
messengerAddress: messengerAddress,
messengerABI: bridge_abi.L2MessengerMetaABI,
stopCh: make(chan struct{}),
stopped: 0,
batchProposer: newBatchProposer(bpCfg, orm),
}
}
@@ -85,63 +82,34 @@ func (w *WatcherClient) Start() {
panic("must run L2 watcher with DB")
}
lastFetchedBlock, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
panic(fmt.Sprintf("failed to GetBlockTracesLatestHeight in DB: %v", err))
}
if lastFetchedBlock < 0 {
lastFetchedBlock = 0
}
lastBlockHeightChangeTime := time.Now()
// trigger by timer
// TODO: make it configurable
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
for ; true; <-ticker.C {
select {
case <-ticker.C:
case <-w.stopCh:
return
default:
// get current height
number, err := w.BlockNumber(w.ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
duration := time.Since(lastBlockHeightChangeTime)
var blockToFetch uint64
if number > uint64(lastFetchedBlock)+w.confirmations {
// latest block height changed
blockToFetch = number - w.confirmations
} else if duration.Seconds() > 60 {
// l2geth didn't produce any blocks more than 1 minute.
blockToFetch = number
}
// fetch at most `blockTracesFetchLimit=10` missing blocks
if blockToFetch > uint64(lastFetchedBlock)+blockTracesFetchLimit {
blockToFetch = uint64(lastFetchedBlock) + blockTracesFetchLimit
}
if lastFetchedBlock != int64(blockToFetch) {
lastFetchedBlock = int64(blockToFetch)
lastBlockHeightChangeTime = time.Now()
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
if err := w.tryFetchRunningMissingBlocks(w.ctx, blockToFetch); err != nil {
log.Error("failed to fetchRunningMissingBlocks", "err", err)
}
// @todo handle error
if err := w.fetchContractEvent(number); err != nil {
log.Error("failed to fetchContractEvent", "err", err)
}
if err := w.tryProposeBatch(); err != nil {
log.Error("failed to tryProposeBatch", "err", err)
}
case <-w.stopCh:
return
var wg sync.WaitGroup
wg.Add(3)
go w.tryFetchRunningMissingBlocks(w.ctx, &wg, number)
go w.FetchContractEvent(&wg, number)
go w.batchProposer.tryProposeBatch(&wg)
wg.Wait()
}
}
}()
@@ -155,109 +123,134 @@ func (w *WatcherClient) Stop() {
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, wg *sync.WaitGroup, blockHeight uint64) {
defer wg.Done()
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
return fmt.Errorf("failed to GetBlockTraces in DB: %v", err)
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
return
}
backTrackTo := uint64(0)
// Can't get trace from genesis block, so the default start number is 1.
var from = uint64(1)
if heightInDB > 0 {
backTrackTo = uint64(heightInDB)
from = uint64(heightInDB) + 1
}
// start backtracking
for ; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
if to > blockHeight {
to = blockHeight
}
// Get block traces and insert into db.
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
}
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*types.BlockTrace
for number := backTrackFrom; number > backTrackTo; number-- {
for number := from; number <= to; number++ {
log.Debug("retrieving block trace", "height", number)
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
}
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash)
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
traces = append(traces, trace)
}
if len(traces) > 0 {
if err = w.orm.InsertBlockTraces(ctx, traces); err != nil {
if err := w.orm.InsertBlockTraces(traces); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
}
}
return nil
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
func (w *WatcherClient) FetchContractEvent(wg *sync.WaitGroup, blockHeight uint64) {
defer wg.Done()
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
toBlock := int64(blockHeight)
if toBlock < fromBlock {
return nil
}
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(toBlock)
return nil
}
log.Info("received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return err
}
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
log.Error("failed to get event logs", "err", err)
return
}
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
}
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err)
return
}
w.processedMsgHeight = uint64(to)
}
return err
}
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
@@ -268,7 +261,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
var relayedMessages []relayedMessage
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
@@ -289,7 +282,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -300,7 +293,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -311,7 +304,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}

View File

@@ -1,33 +1,5 @@
package l2
import (
"errors"
"github.com/scroll-tech/go-ethereum/rpc"
)
// WatcherAPI watcher api service
type WatcherAPI interface {
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
}
// ReplayBlockResultByHash temporary interface for easy testing.
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
orm := r.orm
params := make(map[string]interface{})
if number, ok := blockNrOrHash.Number(); ok {
params["number"] = int64(number)
}
if hash, ok := blockNrOrHash.Hash(); ok {
params["hash"] = hash.String()
}
if len(params) == 0 {
return false, errors.New("empty params")
}
trace, err := orm.GetBlockTraces(params)
if err != nil {
return false, err
}
r.Send(&trace[0])
return true, nil
}

View File

@@ -1,11 +1,9 @@
package l2_test
package l2
import (
"context"
"crypto/ecdsa"
"encoding/json"
"math/big"
"os"
"strconv"
"testing"
"time"
@@ -16,7 +14,7 @@ import (
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
@@ -33,7 +31,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
defer l2db.Close()
l2cfg := cfg.L2Config
rc := l2.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.ProofGenerationFreq, l2cfg.SkippedOpcodes, l2cfg.L2MessengerAddress, l2db)
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
rc.Start()
defer rc.Stop()
@@ -69,19 +67,22 @@ func testMonitorBridgeContract(t *testing.T) {
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, db, address)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc.Start()
defer rc.Stop()
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
@@ -91,7 +92,7 @@ func testMonitorBridgeContract(t *testing.T) {
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
@@ -111,7 +112,7 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
@@ -128,12 +129,12 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, db, address)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc.Start()
defer rc.Stop()
@@ -148,7 +149,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
}
@@ -164,7 +167,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
@@ -179,27 +184,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func testTraceHasUnsupportedOpcodes(t *testing.T) {
delegateTrace, err := os.ReadFile("../../common/testdata/blockTrace_delegate.json")
assert.NoError(t, err)
trace := &types.BlockTrace{}
assert.NoError(t, json.Unmarshal(delegateTrace, &trace))
assert.Equal(t, true, len(cfg.L2Config.SkippedOpcodes) == 2)
_, exist := cfg.L2Config.SkippedOpcodes["DELEGATECALL"]
assert.Equal(t, true, exist)
assert.True(t, l2.TraceHasUnsupportedOpcodes(cfg.L2Config.SkippedOpcodes, trace))
}
func prepareRelayerClient(l2Cli *ethclient.Client, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
return l2.NewL2WatcherClient(context.Background(), l2Cli, 0, 1, map[string]struct{}{}, contractAddr, db)
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {

View File

@@ -0,0 +1,187 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL1 {
/*********************************
* Events from L1ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/************************
* Events from ZKRollup *
************************/
/// @notice Emitted when a new batch is commited.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/// @notice Emitted when a batch is reverted.
/// @param _batchId The identification of the batch.
event RevertBatch(bytes32 indexed _batchId);
/// @notice Emitted when a batch is finalized.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/***********
* Structs *
***********/
struct L2MessageProof {
uint256 batchIndex;
uint256 blockHeight;
bytes merkleProof;
}
/// @dev The transanction struct
struct Layer2Transaction {
address caller;
uint64 nonce;
address target;
uint64 gas;
uint256 gasPrice;
uint256 value;
bytes data;
// signature
uint256 r;
uint256 s;
uint64 v;
}
/// @dev The block header struct
struct Layer2BlockHeader {
bytes32 blockHash;
bytes32 parentHash;
uint256 baseFee;
bytes32 stateRoot;
uint64 blockHeight;
uint64 gasUsed;
uint64 timestamp;
bytes extraData;
Layer2Transaction[] txs;
}
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
struct Layer2Batch {
uint64 batchIndex;
// The hash of the last block in the parent batch
bytes32 parentHash;
Layer2BlockHeader[] blocks;
}
struct Layer2BatchStored {
bytes32 batchHash;
bytes32 parentHash;
uint64 batchIndex;
bool verified;
}
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/// @notice Mapping from batch id to batch struct.
mapping(bytes32 => Layer2BatchStored) public batches;
/************************************
* Functions from L1ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
uint256 _nonce = messageNonce;
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce += 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
/***************************
* Functions from ZKRollup *
***************************/
function commitBatch(Layer2Batch memory _batch) external {
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
Layer2BatchStored storage _batchStored = batches[_batchId];
_batchStored.batchHash = _batchHash;
_batchStored.parentHash = _batch.parentHash;
_batchStored.batchIndex = _batch.batchIndex;
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
}
function revertBatch(bytes32 _batchId) external {
emit RevertBatch(_batchId);
}
function finalizeBatchWithProof(
bytes32 _batchId,
uint256[] memory,
uint256[] memory
) external {
Layer2BatchStored storage _batch = batches[_batchId];
uint256 _batchIndex = _batch.batchIndex;
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
}
/// @dev Internal function to compute a unique batch id for mapping.
/// @param _batchHash The hash of the batch.
/// @param _parentHash The hash of the batch.
/// @param _batchIndex The index of the batch.
/// @return Return the computed batch id.
function _computeBatchId(
bytes32 _batchHash,
bytes32 _parentHash,
uint256 _batchIndex
) internal pure returns (bytes32) {
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
}
}

View File

@@ -0,0 +1,67 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL2 {
/*********************************
* Events from L2ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/************************************
* Functions from L2ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _nonce = messageNonce;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce = _nonce + 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
}

View File

@@ -1,42 +0,0 @@
//SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract Mock_Bridge {
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
function sendMessage(
address _to,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
// @todo compute fee
uint256 _fee = 0;
uint256 _nonce = messageNonce;
require(msg.value >= _fee, "cannot pay fee");
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
unchecked {
messageNonce = _nonce + 1;
}
}
}

View File

@@ -12,7 +12,7 @@ import (
"sync/atomic"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
@@ -156,7 +156,7 @@ func (s *Sender) NumberOfAccounts() int {
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
// estimate gas limit
gasLimit, err := s.client.EstimateGas(s.ctx, ethereum.CallMsg{From: auth.From, To: target, Value: value, Data: data})
gasLimit, err := s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
}

View File

@@ -22,7 +22,7 @@ import (
"scroll-tech/bridge/sender"
)
const TX_BATCH = 50
const TXBatch = 50
var (
privateKeys []*ecdsa.PrivateKey
@@ -84,7 +84,7 @@ func testBatchSender(t *testing.T, batchSize int) {
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TX_BATCH; i++ {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
@@ -103,7 +103,7 @@ func testBatchSender(t *testing.T, batchSize int) {
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TX_BATCH*newSender.NumberOfAccounts())
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)

204
bridge/tests/bridge_test.go Normal file
View File

@@ -0,0 +1,204 @@
package tests
import (
"context"
"crypto/ecdsa"
"math/big"
"scroll-tech/common/docker"
"testing"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
)
var (
// config
cfg *config.Config
// private key
privateKey *ecdsa.PrivateKey
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
// clients
l1Client *ethclient.Client
l2Client *ethclient.Client
// auth
l1Auth *bind.TransactOpts
l2Auth *bind.TransactOpts
// l1 messenger contract
l1MessengerInstance *mock_bridge.MockBridgeL1
l1MessengerAddress common.Address
// l1 rollup contract
l1RollupInstance *mock_bridge.MockBridgeL1
l1RollupAddress common.Address
// l2 messenger contract
l2MessengerInstance *mock_bridge.MockBridgeL2
l2MessengerAddress common.Address
)
func setupEnv(t *testing.T) {
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = 0
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.Confirmations = 0
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
assert.NoError(t, err)
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
// Create l1 and l2 auth
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func prepareContracts(t *testing.T) {
var err error
var tx *types.Transaction
// L1 messenger contract
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L1 rollup contract
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L2 messenger contract
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
assert.NoError(t, err)
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
assert.NoError(t, err)
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
cfg.L1Config.RollupContractAddress = l1RollupAddress
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
}
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
return auth
}
func TestFunction(t *testing.T) {
setupEnv(t)
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -0,0 +1,175 @@
package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var wg sync.WaitGroup
wg.Add(4)
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l2 watch process events
l2Watcher.FetchContractEvent(&wg, sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents(&wg)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
}

138
bridge/tests/rollup_test.go Normal file
View File

@@ -0,0 +1,138 @@
package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// add some blocks to db
var traces []*types.BlockTrace
var parentHash common.Hash
for i := 1; i <= 10; i++ {
header := types.Header{
Number: big.NewInt(int64(i)),
ParentHash: parentHash,
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
traces = append(traces, &types.BlockTrace{
Header: &header,
StorageTrace: &types.StorageTrace{},
})
parentHash = header.Hash()
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add one batch to db
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[1].Header.Number.Uint64(),
Hash: traces[1].Header.Hash().String(),
ParentHash: traces[1].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
wg.Add(1)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
}

View File

@@ -20,8 +20,8 @@ func encodePacked(input ...[]byte) []byte {
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
target common.Address,
sender common.Address,
target common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
@@ -29,8 +29,8 @@ func ComputeMessageHash(
messageNonce *big.Int,
) common.Hash {
packed := encodePacked(
target.Bytes(),
sender.Bytes(),
target.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),

View File

@@ -7,6 +7,7 @@ import (
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
@@ -28,15 +29,13 @@ func TestKeccak2(t *testing.T) {
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xdafea492d9c6733ae3d56b7ed1adb60692c98bc5"),
common.HexToAddress("0xeafea492d9c6733ae3d56b7ed1adb60692c98bf7"),
big.NewInt(1),
big.NewInt(2),
big.NewInt(1234567),
common.Hex2Bytes("0011223344"),
big.NewInt(3),
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
)
if hash != common.HexToHash("0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b") {
t.Fatalf("Invalid ComputeMessageHash, want %s, got %s", "0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b", hash.Hex())
}
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
}

View File

@@ -179,6 +179,13 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -200,16 +207,7 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
# Exclude some staticcheck messages
@@ -217,18 +215,6 @@ issues:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll

View File

@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build bridge

View File

@@ -1,5 +1,5 @@
assets/
docs/
integration-test/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.17-rust-nightly-2022-08-23 as chef
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
WORKDIR app
FROM chef as planner
@@ -24,6 +24,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x

View File

@@ -1,6 +1,6 @@
assets/
contracts/
docs/
integration-test/
l2geth/
rpc-gateway/
*target/*

View File

@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build db_cli

View File

@@ -1,6 +1,6 @@
assets/
contracts/
docs/
integration-test/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,14 @@
#!/bin/bash
set -uex
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
npx cobertura-merge -o cobertura.xml \
package1=coverage.bridge.xml \
package2=coverage.db.xml \
package3=coverage.common.xml \
package4=coverage.coordinator.xml \
package5=coverage.integration.xml

View File

@@ -0,0 +1,63 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
// LOG_DOCKER = 'true'
}
stages {
stage('Tag') {
steps {
script {
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
sh "echo ${TAGNAME}"
// ...
}
}
}
stage('Build') {
environment {
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
DOCKER_CREDENTIALS = credentials('dockerhub')
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
// Use a scripted pipeline.
script {
stage('Push image') {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
}
}
}
}
}
}
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
}
}
}

89
common/cmd/cmd.go Normal file
View File

@@ -0,0 +1,89 @@
package cmd
import (
"os"
"os/exec"
"strings"
"sync"
"testing"
cmap "github.com/orcaman/concurrent-map"
)
var verbose bool
func init() {
v := os.Getenv("LOG_DOCKER")
if v == "true" || v == "TRUE" {
verbose = true
}
}
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
name string
args []string
mu sync.Mutex
cmd *exec.Cmd
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
//stdout bytes.Buffer
Err error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T, name string, args ...string) *Cmd {
return &Cmd{
T: t,
checkFuncs: cmap.New(),
name: name,
args: args,
}
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Set(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
t.checkFuncs.Pop(key)
}
func (t *Cmd) runCmd() {
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(parallel bool) {
t.Log("cmd: ", t.args)
if parallel {
go t.runCmd()
} else {
t.runCmd()
}
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
t.Logf("%s: %v", t.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
t.Logf("%s: %v", t.name, out)
}
go t.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)
check(out)
})
return len(data), nil
}

114
common/cmd/cmd_app.go Normal file
View File

@@ -0,0 +1,114 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(waitResult func() bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
}()
waitResult()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// WaitResult return true when get the keyword during timeout.
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
if keyword == "" {
return false
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return true
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
return false
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}

42
common/cmd/cmd_test.go Normal file
View File

@@ -0,0 +1,42 @@
package cmd_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/common/cmd"
)
func TestCmd(t *testing.T) {
app := cmd.NewCmd(t, "curTime", "date", "+%Y-%m-%d")
tm := time.Now()
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
okCh := make(chan struct{}, 1)
app.RegistFunc(curTime, func(buf string) {
if strings.Contains(buf, curTime) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer app.UnRegistFunc(curTime)
// Run cmd.
app.RunCmd(true)
// Wait result.
select {
case <-okCh:
return
case <-time.After(time.Second):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", curTime))
}
}

View File

@@ -1,98 +0,0 @@
package docker
import (
"errors"
"os"
"os/exec"
"strings"
"sync"
"testing"
)
var verbose bool
func init() {
v := os.Getenv("LOG_DOCKER")
if v == "true" || v == "TRUE" {
verbose = true
}
}
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
checkFuncs sync.Map //map[string]checkFunc
//stdout bytes.Buffer
errMsg chan error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T) *Cmd {
cmd := &Cmd{
T: t,
//stdout: bytes.Buffer{},
errMsg: make(chan error, 2),
}
// Handle panic.
cmd.RegistFunc("panic", func(buf string) {
if strings.Contains(buf, "panic") {
cmd.errMsg <- errors.New(buf)
}
})
return cmd
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Store(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
if _, ok := t.checkFuncs.Load(key); ok {
t.checkFuncs.Delete(key)
}
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(args []string, parallel bool) {
t.Log("RunCmd cmd", args)
if parallel {
go t.runCmd(args)
} else {
t.runCmd(args)
}
}
// ErrMsg return error output channel
func (t *Cmd) ErrMsg() <-chan error {
return t.errMsg
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
t.Logf(out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
t.Logf(out)
}
go func(content string) {
t.checkFuncs.Range(func(key, value any) bool {
check := value.(checkFunc)
check(content)
return true
})
}(out)
return len(data), nil
}
func (t *Cmd) runCmd(args []string) {
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}

View File

@@ -8,6 +8,9 @@ import (
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgDB the postgres image manager.
@@ -21,19 +24,20 @@ type ImgDB struct {
password string
running bool
*Cmd
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(t *testing.T, image, password, dbName string, port int) ImgInstance {
return &ImgDB{
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
Cmd: NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start postgres db container.
@@ -42,7 +46,7 @@ func (i *ImgDB) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.Cmd.RunCmd(i.prepare(), true)
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -59,14 +63,13 @@ func (i *ImgDB) Stop() error {
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
return err
}
i.id = id
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
@@ -94,7 +97,7 @@ func (i *ImgDB) prepare() []string {
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.RegistFunc(keyword, func(buf string) {
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -103,14 +106,16 @@ func (i *ImgDB) isOk() bool {
}
}
})
defer i.UnRegistFunc(keyword)
defer i.cmd.UnRegistFunc(keyword)
select {
case <-okCh:
time.Sleep(time.Millisecond * 1500)
i.id = GetContainerID(i.name)
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case <-time.NewTimer(time.Second * 10).C:
case <-time.After(time.Second * 20):
return false
}
}

View File

@@ -9,6 +9,9 @@ import (
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgGeth the geth image manager include l1geth and l2geth.
@@ -23,20 +26,21 @@ type ImgGeth struct {
wsPort int
running bool
*Cmd
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(t *testing.T, image, volume, ipc string, hPort, wPort int) ImgInstance {
return &ImgGeth{
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
Cmd: NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start run image and check if it is running healthily.
@@ -45,7 +49,7 @@ func (i *ImgGeth) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.Cmd.RunCmd(i.prepare(), true)
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -72,7 +76,7 @@ func (i *ImgGeth) Endpoint() string {
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.RegistFunc(keyword, func(buf string) {
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -81,13 +85,16 @@ func (i *ImgGeth) isOk() bool {
}
}
})
defer i.UnRegistFunc(keyword)
defer i.cmd.UnRegistFunc(keyword)
select {
case <-okCh:
i.id = GetContainerID(i.name)
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case <-time.NewTimer(time.Second * 10).C:
case <-time.After(time.Second * 10):
return false
}
}

View File

@@ -10,12 +10,19 @@ import (
"github.com/stretchr/testify/assert"
)
func TestL1Geth(t *testing.T) {
func TestDocker(t *testing.T) {
t.Parallel()
t.Run("testL1Geth", testL1Geth)
t.Run("testL2Geth", testL2Geth)
t.Run("testDB", testDB)
}
func testL1Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewImgGeth(t, "scroll_l1geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
img := NewTestL1Docker(t)
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -26,12 +33,11 @@ func TestL1Geth(t *testing.T) {
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
func testL2Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewImgGeth(t, "scroll_l2geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
img := NewTestL2Docker(t)
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -42,7 +48,7 @@ func TestL2Geth(t *testing.T) {
t.Logf("chainId: %s", chainID.String())
}
func TestDB(t *testing.T) {
func testDB(t *testing.T) {
driverName := "postgres"
dbImg := NewTestDBDocker(t, driverName)
defer dbImg.Stop()

View File

@@ -36,7 +36,7 @@ func GetContainerID(name string) string {
Filters: filter,
})
if len(lst) > 0 {
return lst[0].Names[0]
return lst[0].ID
}
return ""
}

View File

@@ -1,11 +1,16 @@
package docker
import (
"context"
"crypto/rand"
"math/big"
"testing"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/utils"
)
var (
@@ -19,6 +24,18 @@ func NewTestL1Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL1geth := NewImgGeth(t, "scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL1geth
}
@@ -27,6 +44,18 @@ func NewTestL2Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL2geth := NewImgGeth(t, "scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL2geth
}
@@ -35,5 +64,15 @@ func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgDB := NewImgDB(t, driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(5, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil
}
return false
})
return imgDB
}

View File

@@ -3,15 +3,15 @@ module scroll-tech/common
go 1.18
require (
github.com/docker/docker v20.10.17+incompatible
github.com/docker/docker v20.10.21+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
)
require (
@@ -27,7 +27,7 @@ require (
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
github.com/edsrzf/mmap-go v1.0.0 // indirect
github.com/ethereum/go-ethereum v1.10.23 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 // indirect
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
@@ -67,7 +67,7 @@ require (
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/rs/cors v1.7.0 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/sirupsen/logrus v1.9.0 // indirect
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 // indirect
@@ -77,11 +77,12 @@ require (
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.2.0 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/text v0.4.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -112,8 +112,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -126,8 +126,8 @@ github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaB
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.23 h1:Xk8XAT4/UuqcjMLIMF+7imjkg32kfVFKoeyQDaO2yWM=
github.com/ethereum/go-ethereum v1.10.23/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
@@ -364,6 +364,8 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
@@ -402,10 +404,11 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6 h1:o0Gq2d8nus6x82apluA5RJwjlOca7LIlpAfxlyvQvxs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0 h1:c0GRNELUyAtyuiwllQKT1XjNbs7NRGfguKouiyLfFNY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -479,8 +482,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -538,8 +541,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.2.0 h1:sZfSu1wtKLGlWI4ZZayP0ck9Y73K1ynO6gqzTdBVdPU=
golang.org/x/net v0.2.0/go.mod h1:KqCZLdyyvdV855qA2rE3GC2aiw5xGR5TEjj8smXukLY=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -603,8 +606,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -616,8 +619,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@@ -1380,7 +1380,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0902#b0ffab97316f9cf4b9e65aba398a047a8d6424a1"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0920-fix#d6bd0f291d41c4585e783d2d94a77fd80e1ba47e"
dependencies = [
"bitvec 0.22.3",
"ff 0.11.1",
@@ -1451,7 +1451,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6f18f38e82d302cd8b6ce8809b59c32350b019a3"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6b8c8a07da9fbf5d5878831e35360b8c5e6d89a3"
dependencies = [
"blake2b_simd",
"cfg-if 0.1.10",
@@ -2397,7 +2397,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/appliedzkp/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/appliedzkp/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -2407,7 +2407,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -3389,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"base64 0.13.0",
"blake2",
@@ -3816,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zkevm"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"anyhow",
"blake2",

View File

@@ -8,8 +8,8 @@ edition = "2021"
crate-type = ["staticlib"]
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/common-rs" }
types = { git = "https://github.com/scroll-tech/common-rs" }
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
log = "0.4"
env_logger = "0.9.0"

View File

@@ -4,12 +4,12 @@ import (
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// RespStatus represents status code from roller to scroll
@@ -36,12 +36,12 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Time of message creation
Timestamp int64 `json:"timestamp"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
// curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
Token string `json:"token"`
@@ -115,12 +115,11 @@ func (a *AuthMsg) PublicKey() (string, error) {
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
bs, err := json.Marshal(i)
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
@@ -204,12 +203,12 @@ type ProofDetail struct {
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
bs, err := json.Marshal(z)
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -4,6 +4,7 @@ import (
"testing"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
@@ -15,7 +16,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testRoller",
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -23,4 +24,10 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
ok, err := authMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Check public key is ok.
pub, err := authMsg.PublicKey()
assert.NoError(t, err)
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
}

View File

@@ -1,12 +0,0 @@
package utils
import "os"
// GetEnvWithDefault get value from env if is none use the default
func GetEnvWithDefault(key string, defult string) string {
val := os.Getenv(key)
if len(val) == 0 {
val = defult
}
return val
}

View File

@@ -1,6 +1,8 @@
package utils
import "github.com/urfave/cli/v2"
import (
"github.com/urfave/cli/v2"
)
var (
// CommonFlags is used for app common flags in different modules

77
common/utils/rpc_test.go Normal file
View File

@@ -0,0 +1,77 @@
package utils
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
)
type testService struct{}
type echoArgs struct {
S string
}
type echoResult struct {
Name string
ID int
Args *echoArgs
}
func (s *testService) NoArgsRets() {}
func (s *testService) Echo(str string, i int, args *echoArgs) echoResult {
return echoResult{str, i, args}
}
func TestStartHTTPEndpoint(t *testing.T) {
endpoint := "localhost:18080"
handler, _, err := StartHTTPEndpoint(endpoint, []rpc.API{
{
Public: true,
Namespace: "test",
Service: new(testService),
},
})
assert.NoError(t, err)
defer handler.Shutdown(context.Background())
client, err := rpc.Dial("http://" + endpoint)
assert.NoError(t, err)
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
result := echoResult{}
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
assert.Equal(t, 0, result.ID)
assert.Equal(t, "test", result.Name)
defer client.Close()
}
func TestStartWSEndpoint(t *testing.T) {
endpoint := "localhost:18081"
handler, _, err := StartWSEndpoint(endpoint, []rpc.API{
{
Public: true,
Namespace: "test",
Service: new(testService),
},
})
assert.NoError(t, err)
defer handler.Shutdown(context.Background())
client, err := rpc.Dial("ws://" + endpoint)
assert.NoError(t, err)
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
result := echoResult{}
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
assert.Equal(t, 0, result.ID)
assert.Equal(t, "test", result.Name)
defer client.Close()
}

View File

@@ -0,0 +1,22 @@
package utils
import (
"fmt"
"os"
"github.com/docker/docker/pkg/reexec"
"github.com/urfave/cli/v2"
)
// RegisterSimulation register initializer function for integration-test.
func RegisterSimulation(app *cli.App, name string) {
// Run the app for integration-test
reexec.Register(name, func() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
})
reexec.Init()
}

View File

@@ -1,30 +1,10 @@
package utils
import (
"sync/atomic"
"github.com/scroll-tech/go-ethereum/core/types"
"golang.org/x/sync/errgroup"
)
// ComputeTraceGasCost computes gascost based on ExecutionResults.StructLogs.GasCost
func ComputeTraceGasCost(trace *types.BlockTrace) uint64 {
var (
gasCost uint64
eg errgroup.Group
)
for idx := range trace.ExecutionResults {
i := idx
eg.Go(func() error {
var sum uint64
for _, log := range trace.ExecutionResults[i].StructLogs {
sum += log.GasCost
}
atomic.AddUint64(&gasCost, sum)
return nil
})
}
_ = eg.Wait()
return gasCost
return trace.Header.GasUsed
}

View File

@@ -19,13 +19,7 @@ func TestComputeTraceCost(t *testing.T) {
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
var sum uint64
for _, v := range blockTrace.ExecutionResults {
for _, sv := range v.StructLogs {
sum += sv.GasCost
}
}
res := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, sum, res)
var expected = blockTrace.Header.GasUsed
got := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, expected, got)
}

13
common/utils/utils.go Normal file
View File

@@ -0,0 +1,13 @@
package utils
import "time"
// TryTimes try run several times until the function return true.
func TryTimes(times int, run func() bool) {
for i := 0; i < times; i++ {
if run() {
return
}
time.Sleep(time.Millisecond * 500)
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "prealpha-v7.2"
var tag = "prealpha-v11.7"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -22,5 +22,8 @@ var commit = func() string {
return ""
}()
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
var Version = fmt.Sprintf("%s-%s", tag, commit)
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)

View File

@@ -232,6 +232,50 @@ function initialize(uint256 _chainId) external nonpayable
|---|---|---|
| _chainId | uint256 | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(bytes32 _blockHash) external view returns (bool)
```
Return whether the block is finalized by block hash.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHash | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(uint256 _blockHeight) external view returns (bool)
```
Return whether the block is finalized by block height.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHeight | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### lastFinalizedBatchID
```solidity

View File

@@ -97,7 +97,7 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
require(!isMessageExecuted[_msghash], "Message successfully executed");
// @todo check proof
require(IZKRollup(rollup).verifyMessageStateProof(_proof.batchIndex, _proof.blockHeight), "invalid state proof");
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHeight), "invalid state proof");
require(ZkTrieVerifier.verifyMerkleProof(_proof.merkleProof), "invalid proof");
// @todo check `_to` address to avoid attack.

View File

@@ -59,6 +59,14 @@ interface IZKRollup {
/**************************************** View Functions ****************************************/
/// @notice Return whether the block is finalized by block hash.
/// @param blockHash The hash of the block to query.
function isBlockFinalized(bytes32 blockHash) external view returns (bool);
/// @notice Return whether the block is finalized by block height.
/// @param blockHeight The height of the block to query.
function isBlockFinalized(uint256 blockHeight) external view returns (bool);
/// @notice Return the message hash by index.
/// @param _index The index to query.
function getMessageHashByIndex(uint256 _index) external view returns (bytes32);

View File

@@ -89,6 +89,24 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
/**************************************** View Functions ****************************************/
/// @inheritdoc IZKRollup
function isBlockFinalized(bytes32 _blockHash) external view returns (bool) {
// block not commited
if (blocks[_blockHash].transactionRoot == bytes32(0)) return false;
uint256 _batchIndex = blocks[_blockHash].batchIndex;
bytes32 _batchId = finalizedBatches[_batchIndex];
return _batchId != bytes32(0);
}
/// @inheritdoc IZKRollup
function isBlockFinalized(uint256 _blockHeight) external view returns (bool) {
bytes32 _batchID = lastFinalizedBatchID;
bytes32 _batchHash = batches[_batchID].batchHash;
uint256 _maxHeight = blocks[_batchHash].blockHeight;
return _blockHeight <= _maxHeight;
}
/// @inheritdoc IZKRollup
function getMessageHashByIndex(uint256 _index) external view returns (bytes32) {
return messageQueue[_index];

View File

@@ -386,7 +386,7 @@ library RollupVerifier {
t1
)
);
update_hash_scalar(7326291674247555594112707886804937707847188185923070866278273345303869756280, absorbing, 0);
update_hash_scalar(18620528901694425296072105892920066495478887717015933899493919566746585676047, absorbing, 0);
update_hash_point(m[0], m[1], absorbing, 2);
for (t0 = 0; t0 <= 4; t0++) {
update_hash_point(proof[0 + t0 * 2], proof[1 + t0 * 2], absorbing, 5 + t0 * 3);
@@ -724,8 +724,8 @@ library RollupVerifier {
(t0, t1) = (ecc_mul_add_pm(m, proof, 1461486238301980199876269201563775120819706402602, t0, t1));
(t0, t1) = (
ecc_mul_add(
18701609130775737229348071043080155034023979562517390395403433088802478899758,
15966955543930185772599298905781740007968379271659670990460125132276790404701,
5335172776193682293002595672140655300498265857728161236987288793112411362256,
9153855726472286104461915396077745889260526805920405949557461469033032628222,
m[78],
t0,
t1
@@ -733,8 +733,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10391672869328159104536012527288890078475214572275421477472198141744100604180,
16383182967525077486800851500412772270268328143041811261940514978333847876450,
9026202793013131831701482540600751978141377016764300618037152689098701087208,
19644677619301694001087044142922327551116787792977369058786364247421954485859,
m[77],
t0,
t1
@@ -742,8 +742,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
1694121668121560366967381814358868176695875056710903754887787227675156636991,
6288755472313871386012926867179622380057563139110460659328016508371672965822,
10826234859452509771814283128042282248838241144105945602706734900173561719624,
5628243352113405764051108388315822074832358861640908064883601198703833923438,
m[76],
t0,
t1
@@ -751,8 +751,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8449090587209846475328734419746789925412190193479844231777165308243174237722,
19620423218491500875965944829407986067794157844846402182805878618955604592848,
9833916648960859819503777242562918959056952519298917148524233826817297321072,
837915750759756490172805968793319594111899487492554675680829218939384285955,
m[75],
t0,
t1
@@ -760,8 +760,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
5053208336959682582031156680199539869251745263409434673229644546747696847142,
2515271708296970065769200367712058290268116287798438948140802173656220671206,
10257805671474982710489846158410183388099935223468876792311814484878286190506,
6925081619093494730602614238209964215162532591387952125009817011864359314464,
m[74],
t0,
t1
@@ -769,8 +769,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
14044565934581841113280816557133159251170886931106151374890478449607604267942,
4516676687937794780030405510740994119381246893674971835541700695978704585552,
4475887208248126488081900175351981014160135345959097965081514547035591501401,
17809934801579097157548855239127693133451078551727048660674021788322026074440,
m[73],
t0,
t1
@@ -823,8 +823,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
4919836553908828082540426444868776555669883964231731088484431671272015675682,
2534996469663628472218664436969797350677809756735321673130157881813913441609,
18539453526841971932568089122596968064597086391356856358866942118522457107863,
3647865108410881496134024808028560930237661296032155096209994441023206530212,
m[67],
t0,
t1
@@ -841,8 +841,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
7298741378311576950839968993357330108079245118485170808123459961337830256312,
10327561179499117619949936626306234488421661318541529469701192193684736307992,
19267653195273486172950176174654469275684545789180737280515385961619717720594,
8975180971271331994178632284567744253406636398050840906044549681238954521839,
m[65],
t0,
t1
@@ -859,8 +859,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
21344975294019301064497004820288763682448968861642019035490416932201272957274,
10527619823264344893410550194287064640208153251186939130321425213582959780489,
14883199025754033315476980677331963148201112555947054150371482532558947065890,
19913319410736467436640597337700981504577668548125107926660028143291852201132,
m[63],
t0,
t1
@@ -868,8 +868,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8972742415650205333409282370033440562593431348747288268814492203356823531160,
8116706321112691122771049432546166822575953322170688547310064134261753771143,
18290509522533712126835038141804610778392690327965261406132668236833728306838,
3710298066677974093924183129147170087104393961634393354172472701713090868425,
m[62],
t0,
t1
@@ -877,8 +877,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
2245383788954722547301665173770198299224442299145553661157120655982065376923,
21429627532145565836455474503387893562363999035988060101286707048187310790834,
19363467492491917195052183458025198134822377737629876295496853723068679518308,
5854330679906778271391785925618350923591828884998994352880284635518306250788,
m[61],
t0,
t1
@@ -886,8 +886,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
6631831869726773976361406817204839637256208337970281843457872807848960103655,
9564029493986604546558813596663080644256762699468834511701525072767927949801,
16181109780595136982201896766265118193466959602989950846464420951358063185297,
8811570609098296287610981932552574275858846837699446990256241563674576678567,
m[60],
t0,
t1
@@ -895,8 +895,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
11480433023546787855799302686493624232665854025790899812568432142639901048711,
19408335616099148180409133533838326787843523379558500985213116784449716389602,
10062549363619405779400496848029040530770586215674909583260224093983878118724,
1989582851705118987083736605676322092152792129388805756040224163519806904905,
m[59],
t0,
t1
@@ -904,8 +904,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
17119009547436104907589161251911916154539209413889810725547125453954285498068,
16196009614025712805558792610177918739658373559330006740051047693948800191562,
19016883348721334939078393300672242978266248861945205052660538073783955572863,
1976040209279107904310062622264754919366006151976304093568644070161390236037,
m[58],
t0,
t1
@@ -922,8 +922,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18650010323993268535055713787599480879302828622769515272251129462854128226895,
11244246887388549559894193327128701737108444364011850111062992666532968469107,
10610438624239085062445835373411523076517149007370367578847561825933262473434,
14538778619212692682166219259545768162136692909816914624880992580957990166795,
m[56],
t0,
t1

View File

@@ -4,22 +4,26 @@ IMAGE_NAME=coordinator
IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
test:
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 $(PWD)/...
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
cp -r ../common/libzkp/interface ./verifier/lib
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
coordinator: ## Builds the Coordinator instance.
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
cp -r ../common/libzkp/interface ./verifier/lib
go build -o $(PWD)/build/bin/coordinator ./cmd
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier:
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier
test-gpu-verifier:
test-gpu-verifier: libzkp
go test -tags="gpu ffi" -timeout 0 -v ./verifier
lint: ## Lint the files - used for CI

View File

@@ -9,17 +9,6 @@ make clean
make coordinator
```
## db config
* db settings in config
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
```
## Start
* use default ports and config.json

View File

@@ -74,21 +74,22 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
go func() {
defer func() {
m.freeRoller(pubkey)
log.Info("roller unregister", "name", authMsg.Identity.Name)
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
}()
for {
select {
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case <-rpcSub.Err():
case err := <-rpcSub.Err():
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
return
case <-notifier.Closed():
return
}
}
}()
log.Info("roller register", "name", authMsg.Identity.Name, "version", authMsg.Identity.Version)
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
return rpcSub, nil
}
@@ -115,6 +116,5 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) (bool, error) {
}
defer m.freeTaskIDForRoller(pubkey, proof.ID)
log.Info("Received zk proof", "proof id", proof.ID, "result", true)
return true, nil
}

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -1,27 +0,0 @@
package client
import (
"context"
"github.com/scroll-tech/go-ethereum"
"scroll-tech/common/message"
)
// RequestToken generates token for roller
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
var token string
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
return token, err
}
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
}
// SubmitProof get proof from roller.
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) (bool, error) {
var ok bool
return ok, c.client.CallContext(ctx, &ok, "roller_submitProof", proof)
}

View File

@@ -3,7 +3,10 @@ package client
import (
"context"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/message"
)
// Client defines typed wrappers for the Ethereum RPC API.
@@ -29,3 +32,21 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
func NewClient(c *rpc.Client) *Client {
return &Client{client: c}
}
// RequestToken generates token for roller
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
var token string
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
return token, err
}
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
}
// SubmitProof get proof from roller.
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) (bool, error) {
var ok bool
return ok, c.client.CallContext(ctx, &ok, "roller_submitProof", proof)
}

134
coordinator/cmd/app/app.go Normal file
View File

@@ -0,0 +1,134 @@
package app
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator"
"scroll-tech/coordinator/config"
)
var (
// Set up Coordinator app info.
app *cli.App
)
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "coordinator"
app.Usage = "The Scroll L2 Coordinator"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, "coordinator-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return err
}
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
if err != nil {
return err
}
defer func() {
rollerManager.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start roller manager", "error", err)
}
apis := rollerManager.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(wsListenAddrFlag.Name),
ctx.Int(wsPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
}()
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run coordinator.
func Run() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunCoordinator(t *testing.T) {
coordinator := cmd.NewCmd(t, "coordinator-test", "--version")
defer coordinator.WaitExit()
// wait result
coordinator.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("coordinator version %s", version.Version))
coordinator.RunApp(nil)
}

View File

@@ -1,13 +1,8 @@
package main
package app
import "github.com/urfave/cli/v2"
var (
verifierMockFlag = cli.BoolFlag{
Name: "verifier.mock",
Usage: "Mock the verifier",
Value: false,
}
apiFlags = []cli.Flag{
// http flags
&httpEnabledFlag,

View File

@@ -1,125 +1,7 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/database"
"scroll-tech/coordinator"
"scroll-tech/coordinator/config"
)
import "scroll-tech/coordinator/cmd/app"
func main() {
// Set up Coordinator app info.
app := cli.NewApp()
app.Action = action
app.Name = "coordinator"
app.Usage = "The Scroll L2 Coordinator"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, &verifierMockFlag)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Run the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(verifierMockFlag.Name) {
cfg.RollerManagerConfig.Verifier = &config.VerifierConfig{MockMode: ctx.Bool(verifierMockFlag.Name)}
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory)
if err != nil {
return err
}
defer func() {
rollerManager.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start coordinator", "error", err)
}
apis := rollerManager.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start HTTP api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(wsListenAddrFlag.Name),
ctx.Int(wsPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
}()
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
app.Run()
}

View File

@@ -13,5 +13,8 @@
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
},
"l2_config": {
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc"
}
}

View File

@@ -7,8 +7,6 @@ import (
"path/filepath"
"strings"
"scroll-tech/common/utils"
db_config "scroll-tech/database"
)
@@ -26,10 +24,17 @@ type RollerManagerConfig struct {
TokenTimeToLive int `json:"token_time_to_live"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// l2geth node url.
Endpoint string `json:"endpoint"`
}
// Config load configuration items.
type Config struct {
RollerManagerConfig *RollerManagerConfig `json:"roller_manager_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
L2Config *L2Config `json:"l2_config"`
}
// VerifierConfig load zk verifier config.
@@ -52,10 +57,6 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
// Check roller's order session
order := strings.ToUpper(cfg.RollerManagerConfig.OrderSession)
if len(order) > 0 && !(order == "ASC" || order == "DESC") {

View File

@@ -5,7 +5,7 @@ go 1.18
require (
github.com/orcaman/concurrent-map v1.0.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
@@ -17,23 +17,26 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.10.23 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
github.com/scroll-tech/zktrie v0.3.0 // indirect
github.com/scroll-tech/zktrie v0.3.1 // indirect
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/tklauser/go-sysconf v0.3.10 // indirect
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 // indirect
golang.org/x/sys v0.2.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -105,17 +105,20 @@ github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQxhYBkKyu5mEDHw=
github.com/ethereum/go-ethereum v1.10.23 h1:Xk8XAT4/UuqcjMLIMF+7imjkg32kfVFKoeyQDaO2yWM=
github.com/ethereum/go-ethereum v1.10.23/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
@@ -179,6 +182,8 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -187,15 +192,19 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
@@ -214,6 +223,7 @@ github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bS
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -257,6 +267,7 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
@@ -264,6 +275,8 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -271,6 +284,9 @@ github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -322,19 +338,22 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6 h1:o0Gq2d8nus6x82apluA5RJwjlOca7LIlpAfxlyvQvxs=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221125025612-4ea77a7577c6/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0 h1:c0GRNELUyAtyuiwllQKT1XjNbs7NRGfguKouiyLfFNY=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
@@ -349,6 +368,7 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@@ -371,6 +391,7 @@ github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYa
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
@@ -403,8 +424,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90 h1:Y/gsMcFOcR+6S6f3YeMKl5g+dZMEWqcz5Czj/GWYbkM=
golang.org/x/crypto v0.0.0-20220829220503-c86fa9a7ed90/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -513,13 +534,14 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.2.0 h1:ljd4t30dBnAvMZaQCevtY0xLLD0A+bRZXbgLMLU1F/A=
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -531,9 +553,11 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -620,6 +644,7 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -11,6 +11,9 @@ import (
cmap "github.com/orcaman/concurrent-map"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
@@ -62,7 +65,6 @@ type Manager struct {
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
// TODO: once put into use, should add to graceful restart.
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
@@ -72,6 +74,9 @@ type Manager struct {
// db interface
orm database.OrmFactory
// l2geth client
*ethclient.Client
// Token cache
tokenCache *cache.Cache
// A mutex guarding registration
@@ -80,14 +85,13 @@ type Manager struct {
// New returns a new instance of Manager. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `manager.Start`.
func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmFactory) (*Manager, error) {
func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmFactory, client *ethclient.Client) (*Manager, error) {
v, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
return nil, err
}
log.Info("Start coordinator successfully.")
return &Manager{
ctx: ctx,
cfg: cfg,
@@ -96,6 +100,7 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
failedSessionInfos: make(map[string]*SessionInfo),
verifier: v,
orm: orm,
Client: client,
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
}, nil
}
@@ -189,8 +194,18 @@ func (m *Manager) restorePrevSessions() {
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[sess.info.ID] = sess
log.Info("Coordinator restart reload sessions", "ID", sess.info.ID, "sess", sess.info)
go m.CollectProofs(sess.info.ID, sess)
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
go m.CollectProofs(sess)
}
}
}
@@ -214,17 +229,29 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
proofTimeSec := uint64(time.Since(time.Unix(sess.info.StartTimestamp, 0)).Seconds())
// Ensure this roller is eligible to participate in the session.
if roller, ok := sess.info.Rollers[pk]; !ok {
return fmt.Errorf("roller %s is not eligible to partake in proof session %v", pk, msg.ID)
} else if roller.Status == orm.RollerProofValid {
roller, ok := sess.info.Rollers[pk]
if !ok {
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
}
if roller.Status == orm.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("roller has already submitted valid proof in proof session", "roller", pk, "proof id", msg.ID)
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof id", msg.ID,
)
return nil
}
log.Info("Received zk proof", "proof id", msg.ID)
log.Info(
"handling zk proof",
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
)
defer func() {
// TODO: maybe we should use db tx for the whole process?
@@ -244,12 +271,13 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}()
if msg.Status != message.StatusOk {
log.Error("Roller failed to generate proof", "msg.ID", msg.ID, "error", msg.Error)
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskFailed); dbErr != nil {
log.Error("failed to update task status as failed", "error", dbErr)
}
// record the failed session.
m.addFailedSession(sess, msg.Error)
log.Error(
"Roller failed to generate proof",
"msg.ID", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"error", msg.Error,
)
return nil
}
@@ -274,8 +302,6 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
success, err = m.verifier.VerifyProof(msg.Proof)
if err != nil {
// record failed session.
m.addFailedSession(sess, err.Error())
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
@@ -284,35 +310,27 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
}
var status orm.ProvingStatus
if success {
status = orm.ProvingTaskVerified
} else {
// Set status as skipped if verification fails.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
status = orm.ProvingTaskFailed
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update proving_status",
"msg.ID", msg.ID,
"status", orm.ProvingTaskVerified,
"error", dbErr)
}
return dbErr
}
if dbErr = m.orm.UpdateProvingStatus(msg.ID, status); dbErr != nil {
log.Error("failed to update proving_status", "msg.ID", msg.ID, "status", status, "error", dbErr)
}
return dbErr
return nil
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(id string, sess *session) {
timer := time.NewTimer(time.Duration(m.cfg.CollectionTime) * time.Minute)
func (m *Manager) CollectProofs(sess *session) {
for {
select {
case <-timer.C:
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
m.mu.Lock()
// Ensure proper clean-up of resources.
defer func() {
delete(m.sessions, id)
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
}()
@@ -329,13 +347,13 @@ func (m *Manager) CollectProofs(id string, sess *session) {
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", id)
log.Warn(errMsg, "session id", sess.info.ID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if err := m.orm.UpdateProvingStatus(id, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", id, "err", err)
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
}
return
}
@@ -345,17 +363,32 @@ func (m *Manager) CollectProofs(id string, sess *session) {
_ = participatingRollers[randIndex]
// TODO: reward winner
return
case ret := <-sess.finishChan:
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
m.mu.Unlock()
if m.isSessionFailed(sess.info) {
if err := m.orm.UpdateProvingStatus(ret.id, orm.ProvingTaskFailed); err != nil {
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
}
m.mu.Unlock()
}
}
}
func (m *Manager) isSessionFailed(info *orm.SessionInfo) bool {
for _, roller := range info.Rollers {
if roller.Status != orm.RollerProofInvalid {
return false
}
}
return true
}
// APIs collect API services.
func (m *Manager) APIs() []rpc.API {
return []rpc.API{
@@ -373,65 +406,101 @@ func (m *Manager) APIs() []rpc.API {
}
// StartProofGenerationSession starts a proof generation session
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) bool {
roller := m.selectRoller()
if roller == nil {
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success bool) {
if m.GetNumberOfIdleRollers() == 0 {
log.Warn("no idle roller when starting proof generation session", "id", task.ID)
return false
}
log.Info("start proof generation session", "id", task.ID)
var dbErr error
defer func() {
if dbErr != nil {
log.Error("StartProofGenerationSession", "dbErr", dbErr)
if !success {
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "dbErr", dbErr, "err", err)
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "err", err)
}
}
}()
traces, err := m.orm.GetBlockTraces(map[string]interface{}{"batch_id": task.ID})
// Get block traces.
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
if err != nil {
log.Error(
"could not GetBlockTraces",
"could not GetBlockInfos",
"batch_id", task.ID,
"error", err,
)
return false
}
log.Info("roller is picked", "name", roller.Name, "public_key", roller.PublicKey)
// send trace to roller
roller.sendTask(task.ID, traces)
pk := roller.PublicKey
sessionInfo := &orm.SessionInfo{
ID: task.ID,
Rollers: map[string]*orm.RollerStatus{
pk: {
PublicKey: pk,
Name: roller.Name,
Status: orm.RollerAssigned,
},
},
StartTimestamp: time.Now().Unix(),
traces := make([]*types.BlockTrace, len(blockInfos))
for i, blockInfo := range blockInfos {
traces[i], err = m.Client.GetBlockTraceByHash(m.ctx, common.HexToHash(blockInfo.Hash))
if err != nil {
log.Error(
"could not GetBlockTraceByNumber",
"block number", blockInfo.Number,
"block hash", blockInfo.Hash,
"error", err,
)
return false
}
}
if err := m.orm.SetSessionInfo(sessionInfo); err != nil {
log.Error("db set session info fail", "pk", pk, "error", err)
// Dispatch task to rollers.
rollers := make(map[string]*orm.RollerStatus)
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller()
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(task.ID, traces) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.ID)
continue
}
rollers[roller.PublicKey] = &orm.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: orm.RollerAssigned}
}
// No roller assigned.
if len(rollers) == 0 {
log.Error("no roller assigned", "id", task.ID, "number of idle rollers", m.GetNumberOfIdleRollers())
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", task.ID, "err", err)
return false
}
// Create a proof generation session.
s := &session{
info: sessionInfo,
sess := &session{
info: &orm.SessionInfo{
ID: task.ID,
Rollers: rollers,
StartTimestamp: time.Now().Unix(),
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
m.sessions[task.ID] = s
m.mu.Unlock()
dbErr = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned)
go m.CollectProofs(task.ID, s)
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "error", err)
for _, roller := range sess.info.Rollers {
log.Error(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
return false
}
m.mu.Lock()
m.sessions[task.ID] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}

Some files were not shown because too many files have changed in this diff Show More