Compare commits

..

87 Commits

Author SHA1 Message Date
Péter Garamvölgyi
97dded9619 improve logs 2023-01-25 17:19:03 +01:00
Péter Garamvölgyi
f48762bf33 Revert "more debug logs"
This reverts commit 42190feb6c.
2023-01-25 16:48:50 +01:00
Péter Garamvölgyi
42190feb6c more debug logs 2023-01-25 16:32:41 +01:00
Péter Garamvölgyi
56080204c5 add more logs 2023-01-25 16:29:34 +01:00
Péter Garamvölgyi
2674dfaf69 change log level 2023-01-25 16:14:19 +01:00
Péter Garamvölgyi
5bffb151a3 use correct parent hash format 2023-01-25 15:46:35 +01:00
Péter Garamvölgyi
fadaec7add retry commitBatch with manual gas estimation 2023-01-25 12:19:25 +01:00
Péter Garamvölgyi
16576b6f53 fix(bridge): Handle duplicate messages (#261) 2023-01-20 12:09:47 +01:00
HAOYUatHZ
aa885f068f docs(db): fix sql comments (#260) 2023-01-20 09:44:19 +08:00
HAOYUatHZ
1f764a579d refactor(bridge): refactor layer2MessageOrm.GetL2Messages() (#243) 2023-01-20 09:29:34 +08:00
HAOYUatHZ
91ee767669 doc(db): update sql comment (#259) 2023-01-20 09:27:50 +08:00
Péter Garamvölgyi
7eac41691e feat(bridge): handle expired messages correctly (#257) 2023-01-19 23:53:34 +01:00
Péter Garamvölgyi
d9516890b0 feat(bridge): handle expired messages (#256) 2023-01-19 23:21:17 +01:00
Péter Garamvölgyi
ddb96bb732 feat(bridge): add more l1 relayer logs (#255) 2023-01-19 22:37:02 +01:00
Péter Garamvölgyi
e419dd8d5c fix(bridge): add limit to GetL1MessagesByStatus (#254) 2023-01-19 22:19:48 +01:00
Péter Garamvölgyi
c99c65bdfd fix(bridge): execute watcher loops independently (#253) 2023-01-19 21:14:45 +01:00
colin
18fd7f56a8 fix(coordinator): reset roller state when proof session timeout (#210)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-19 21:37:30 +08:00
Péter Garamvölgyi
a319dc1cff bugfix(bridge): only relay messages for finalized batches (#251)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 20:54:09 +08:00
colin
52bf3a55fc fix(coordinator): fix CollectProofs for multi-roller & add tests (#252)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 14:52:20 +08:00
Péter Garamvölgyi
598e10e4fc feat(bridge): update skipped batches in a single db operation (#242) 2023-01-18 15:21:41 +01:00
maskpp
eed3f42731 fix(coordinator): Fix bug in coordinator.GetNumberOfIdleRollers function. (#247)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-18 20:51:19 +08:00
colin
5a4bea8ccd fix(coordinator): set session failed when all rollers have submitted invalid proof (#241)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-18 20:01:43 +08:00
ChuhanJin
5b37b63d89 build(jenkinsfile): replace status_check with comment in coverage test (#249)
Co-authored-by: vincent <419436363@qq.com>
2023-01-18 19:44:47 +08:00
HAOYUatHZ
5e5c4f7701 build(roller&coordinator): fix Makefile (#245)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-18 16:51:55 +08:00
HAOYUatHZ
09dc638652 perf(db): add limit in block_batch queries (#240) 2023-01-17 20:46:51 +08:00
HAOYUatHZ
b598a01e7f build(jenkins): remove changeset condition (#239) 2023-01-17 07:07:58 +08:00
Scroll Dev
0fcdb6f824 doc: bump version number (#238) 2023-01-17 06:57:43 +08:00
Xi Lin
5a95dcf5ba bugfix(bridge&database): fix compute message hash and add unit tests (#233)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-17 06:56:21 +08:00
Xi Lin
d0c63e75df bugfix(database): make sure return order of statuses in GetRollupStatusByIDList (#235)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-16 20:57:27 +08:00
Scroll Dev
676b8a2230 Update pull_request_template.md 2023-01-15 07:46:57 +08:00
Xi Lin
a1cb3d3b87 fix(contracts&libzkp): fix mpt circuits panic (#232)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-14 14:58:37 +08:00
colin
47b4c54e05 fix(relayer): use sync map for concurrent write (#231)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-14 12:07:59 +08:00
maskpp
fe822a65b9 refactor(bridge): refactor bridge parallelization (#213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-14 09:40:07 +08:00
HAOYUatHZ
9bd4931f93 chore: fix typos in pull_request template (#230) 2023-01-13 11:50:39 +08:00
HAOYUatHZ
411cb19b62 chore: add pull_request template (#229) 2023-01-13 11:46:19 +08:00
HAOYUatHZ
8f55299941 Revert "refactor(coordinator): remove debug api namespace from manager" (#224)
Co-authored-by: Nazarii Denha <dengaaa2002@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-13 08:41:36 +08:00
HAOYUatHZ
0bdcce79ba chore: bump version number (#226) 2023-01-13 08:33:53 +08:00
Péter Garamvölgyi
fcd29c305d fix(coordinator): use uint32 for timestamp to enable RLP encoding (#225) 2023-01-12 18:24:59 +01:00
Lawliet-Chan
54a6ab472a feat(message): replace json.Marshal with rlp.Encode for Signing (#219) 2023-01-12 22:21:59 +08:00
HAOYUatHZ
b2a5baa2ad feat(bridge): upgrade TraceGasCost estimation (#222) 2023-01-12 16:45:11 +08:00
Lawliet-Chan
dc6b71ca23 chore: improve golang lint rules (#172)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-12 12:55:00 +08:00
Nazarii Denha
e1247a7eb2 refactor(coordinator): remove debug api namespace from manager (#221)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-12 10:16:48 +08:00
colin
65699b89bb feat(coordinator): support rollers-per-session (#215)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 21:49:37 +08:00
maskpp
a44956a05f fix(bug): fix data race in common/cmd module. (#220) 2023-01-11 21:05:46 +08:00
maskpp
b85b4bafc2 refactor(coordinator): refactor CollectProofs func (#211)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 13:31:42 +08:00
colin
2e3c80c580 feat(roller): add submit proof retry (#217)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-11 12:30:33 +08:00
Péter Garamvölgyi
d24392feac feat(bridge): Propose batches, fetch blocks and events in parallel (#216) 2023-01-10 23:13:06 +08:00
ChuhanJin
5c6e20a774 build(Jenkins): add docker push job for tag (#214)
Co-authored-by: vincent <419436363@qq.com>
2023-01-10 14:43:20 +08:00
HAOYUatHZ
9f9e23ff0e chore(coordinator): add more logs (#177)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-10 09:29:43 +08:00
Péter Garamvölgyi
fa93de97de feat(bridge): Relay messages and batches in parallel (#212)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-10 09:18:36 +08:00
HAOYUatHZ
deedf7a5d0 refactor(coordinator): refactor CollectProofs (#208) 2023-01-10 08:49:14 +08:00
Xi Lin
73432127cd feat(contract): only use blockHeight to verify relay message in layer1 (#209) 2023-01-09 15:03:30 +08:00
colin
a78160ddad fix(roller): avoid discarding task when connection failure (#203)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 10:52:50 +08:00
Lawliet-Chan
fff2517a76 build(libzkp): update dependency common-rs -> scroll-zkevm (#204)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 09:32:14 +08:00
Xi Lin
eba7647e21 bugfix(bridge): add suffix to id in processingFinalization and processingCommitment (#207)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-06 14:33:21 +01:00
Xi Lin
51076d21c3 bugfix(bridge): failed to fetch rollup events on restart (#201) 2023-01-06 20:39:09 +08:00
maskpp
077ed9839a feat(bug): fix bug in l2 watcher (#199) 2023-01-05 17:32:50 +08:00
colinlyguo
bdcca55bd5 fix: roller test path (#202)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-05 11:41:45 +08:00
Xi Lin
20b8e2bf6c feat(contract): add finalization status api in contract (#197)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-04 23:26:47 +08:00
ChuhanJin
cc596c42b3 build: fix rate limit failure by using solc static bin (#200)
Co-authored-by: vincent <419436363@qq.com>
2023-01-04 21:52:10 +08:00
HAOYUatHZ
7da717b251 chore(bridge): update l2 contractEventsBlocksFetchLimit (#198) 2023-01-04 15:47:35 +08:00
HAOYUatHZ
bbdbf3995f feat(bridge): fast catch up new blocks (#184)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-04 15:43:14 +08:00
HAOYUatHZ
7fb8bc6e29 fix(test): fix TestCmd (#194) 2023-01-02 19:38:37 +08:00
HAOYUatHZ
b8fae294e4 build(Jenkins): use parallel testing (#169)
Co-authored-by: Scroll Dev <dev@scroll.io>
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2022-12-29 16:59:06 +08:00
maskpp
23bc381f5c fix(integration-test): fixtestStartProcess (#193) 2022-12-29 16:43:06 +08:00
maskpp
b4ade85a9c feat(roller): If roller is stopped, don't need to retry again. (#191)
Co-authored-by: vincent <419436363@qq.com>
2022-12-28 16:23:29 +08:00
HAOYUatHZ
d04522027c refactor: clean up codes (#187)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-28 10:55:56 +08:00
ChuhanJin
7422bea51f build(jenkinsfile): add coverage (#181)
Co-authored-by: vincent <419436363@qq.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2022-12-27 22:18:17 +08:00
Lawliet-Chan
abcc159390 chore(libzkp): short writings makefile in roller and coordinator (#189) 2022-12-27 18:58:09 +08:00
HAOYUatHZ
a545954dbc chore(bridge): increase blockTracesFetchLimit & `contractEventsBlock… (#185) 2022-12-26 17:55:03 +08:00
HAOYUatHZ
dc6ef83fbd chore: bump version number (#182) 2022-12-26 14:32:11 +08:00
HAOYUatHZ
e17647bc9f chore(batch_proposer): add more check for batchTxNumThreshold (#180) 2022-12-26 14:16:39 +08:00
HAOYUatHZ
feaa95aefe Revert "feat(libzkp): unwind panic (#163)" (#179) 2022-12-26 11:50:27 +08:00
HAOYUatHZ
8ad8a1b6f0 feat(batch_proposer): add batchTxNumThreshold (#178) 2022-12-26 10:17:16 +08:00
maskpp
22f6781c26 feat(Integration test): Enable run child process in integration-test test cases. (#102)
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-22 22:13:04 +08:00
HAOYUatHZ
b165402e81 refactor(bridge): refactor watcher confirmation check and add more logs (#173)
Co-authored-by: Scroll Dev <dev@scroll.io>
2022-12-22 21:26:52 +08:00
HAOYUatHZ
9ee8d977cb build: revert coverage test (#176) 2022-12-22 20:10:32 +08:00
HAOYUatHZ
e1a6eb65f6 build: decrease coverage threshold to 40% (#175) 2022-12-22 15:31:05 +08:00
maskpp
9096334eab chore(database): rename block_result to block_trace (#174) 2022-12-22 13:47:59 +08:00
HAOYUatHZ
c360cf52b1 build: improve dockerfiles (#171) 2022-12-20 11:04:18 +08:00
HAOYUatHZ
00dc075d9c refactor: clean up codes (#168) 2022-12-19 20:06:00 +08:00
ChuhanJin
af77c9fa83 build(jenkinsfile): add coverage (#139)
Co-authored-by: vincent <419436363@qq.com>
2022-12-19 17:07:10 +08:00
maskpp
1c4ed0487a fix(tests): fix docker workflow for tests (#152)
Co-authored-by: colinlyguo <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-19 14:49:01 +08:00
HAOYUatHZ
e4761d9694 refactor(roller): reorg directory (#167) 2022-12-19 11:16:40 +08:00
maskpp
fbc7e03c67 fix(test): Fix weak test code in coordinator module (#165)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-16 15:06:46 +08:00
HAOYUatHZ
927011641d refactor(bridge): remove L2Watcher ReplayBlockResultByHash API (#144)
Co-authored-by: maskpp <maskpp266@gmail.com>
2022-12-16 10:45:46 +08:00
Lawliet-Chan
6eb71869e8 feat(libzkp): unwind panic (#163)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2022-12-15 20:26:38 +08:00
135 changed files with 4738 additions and 1804 deletions

7
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,7 @@
1. Purpose or design rationale of this PR
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

View File

@@ -31,7 +31,11 @@ jobs:
- name: Checkout code
uses: actions/checkout@v2
- name: Install Solc
uses: pontem-network/get-solc@master
uses: supplypike/setup-bin@v3
with:
uri: 'https://github.com/ethereum/solidity/releases/download/v0.8.16/solc-static-linux'
name: 'solc'
version: '0.8.16'
- name: Install Geth Tools
uses: gacts/install-geth-tools@v1
- name: Lint

89
Jenkinsfile vendored
View File

@@ -8,6 +8,7 @@ pipeline {
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
@@ -16,17 +17,6 @@ pipeline {
}
stages {
stage('Build') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
}
}
parallel {
stage('Build Prerequisite') {
steps {
@@ -67,38 +57,63 @@ pipeline {
}
}
}
stage('Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
stage('Parallel Test') {
parallel{
stage('Test bridge package') {
steps {
sh 'go test -v -race -coverprofile=coverage.bridge.txt -covermode=atomic -p 1 scroll-tech/bridge/...'
}
}
stage('Test common package') {
steps {
sh 'go test -v -race -coverprofile=coverage.common.txt -covermode=atomic -p 1 scroll-tech/common/...'
}
}
stage('Test coordinator package') {
steps {
sh 'go test -v -race -coverprofile=coverage.coordinator.txt -covermode=atomic -p 1 scroll-tech/coordinator/...'
}
}
stage('Test database package') {
steps {
sh 'go test -v -race -coverprofile=coverage.db.txt -covermode=atomic -p 1 scroll-tech/database/...'
}
}
stage('Integration test') {
steps {
sh 'go test -v -race -tags="mock_prover mock_verifier" -coverprofile=coverage.integration.txt -covermode=atomic -p 1 scroll-tech/integration-test/...'
}
}
stage('Race test bridge package') {
steps {
sh "cd bridge && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test coordinator package') {
steps {
sh "cd coordinator && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
stage('Race test database package') {
steps {
sh "cd database && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|common\\|l1\\|l2\\|coordinator')"
}
}
}
}
stage('Compare Coverage') {
steps {
catchError(buildResult: 'FAILURE', stageResult: 'FAILURE') {
sh '''
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/database/...
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/bridge/...
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/common/...
go test -v -race -coverprofile=coverage.txt -covermode=atomic -p 1 scroll-tech/coordinator/...
cd ..
'''
script {
for (i in ['bridge', 'coordinator', 'database']) {
sh "cd $i && go test -v -race -coverprofile=coverage.txt -covermode=atomic \$(go list ./... | grep -v 'database\\|l2\\|l1\\|common\\|coordinator')"
}
}
}
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}
post {
always {
post {
always {
publishCoverage adapters: [coberturaReportAdapter(path: 'cobertura.xml', thresholds: [[thresholdTarget: 'Aggregated Report', unhealthyThreshold: 40.0]])], checksName: '', sourceFileResolver: sourceFiles('NEVER_STORE')
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${BUILD_NUMBER} deploy ${currentBuild.result}")
}

View File

@@ -5,7 +5,8 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
mock_abi:
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/Mock_Bridge.sol --pkg mock_bridge --out mock_bridge/Mock_Bridge.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL1.sol --pkg mock_bridge --out mock_bridge/MockBridgeL1.go
go run github.com/scroll-tech/go-ethereum/cmd/abigen --sol mock_bridge/MockBridgeL2.sol --pkg mock_bridge --out mock_bridge/MockBridgeL2.go
bridge: ## Builds the Bridge instance.
go build -o $(PWD)/build/bin/bridge ./cmd

View File

@@ -22,16 +22,6 @@ make clean
make bridge
```
## DB config
* db settings in config
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
```
## Start
* use default ports and config.json

View File

@@ -9,20 +9,20 @@ import (
)
const (
// SENT_MESSAGE_EVENT_SIGNATURE = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SENT_MESSAGE_EVENT_SIGNATURE = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("RelayedMessage(bytes32)")
RELAYED_MESSAGE_EVENT_SIGNATURE = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
RelayedMessageEventSignature = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("FailedRelayedMessage(bytes32)")
FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
FailedRelayedMessageEventSignature = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// COMMIT_BATCH_EVENT_SIGNATURE = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
COMMIT_BATCH_EVENT_SIGNATURE = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// CommitBatchEventSignature = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
CommitBatchEventSignature = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// FINALIZED_BATCH_EVENT_SIGNATURE = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FINALIZED_BATCH_EVENT_SIGNATURE = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
// FinalizedBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FinalizedBatchEventSignature = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
)
var (

125
bridge/cmd/app/app.go Normal file
View File

@@ -0,0 +1,125 @@
package app
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
var (
app *cli.App
)
func init() {
// Set up Bridge app info.
app = cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `bridge-test` app for integration-test.
utils.RegisterSimulation(app, "bridge-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
l2Backend.APIs())
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
log.Info("Start bridge successfully")
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run bridge cmd instance.
func Run() {
// Run the bridge.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunBridge(t *testing.T) {
bridge := cmd.NewCmd(t, "bridge-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(nil)
}

View File

@@ -1,6 +1,8 @@
package main
package app
import "github.com/urfave/cli/v2"
import (
"github.com/urfave/cli/v2"
)
var (
apiFlags = []cli.Flag{
@@ -26,22 +28,4 @@ var (
Usage: "HTTP-RPC server listening port",
Value: 8290,
}
l1Flags = []cli.Flag{
&l1UrlFlag,
}
l1UrlFlag = cli.StringFlag{
Name: "l1.endpoint",
Usage: "The endpoint connect to l1chain node",
Value: "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
}
l2Flags = []cli.Flag{
&l2UrlFlag,
}
l2UrlFlag = cli.StringFlag{
Name: "l2.endpoint",
Usage: "The endpoint connect to l2chain node",
Value: "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc",
}
)

View File

@@ -1,125 +1,7 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
)
import "scroll-tech/bridge/cmd/app"
func main() {
// Set up Bridge app info.
app := cli.NewApp()
app.Action = action
app.Name = "bridge"
app.Usage = "The Scroll Bridge"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, l1Flags...)
app.Flags = append(app.Flags, l2Flags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Run the sequencer.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(l1UrlFlag.Name) {
cfg.L1Config.Endpoint = ctx.String(l1UrlFlag.Name)
}
if ctx.IsSet(l2UrlFlag.Name) {
cfg.L2Config.Endpoint = ctx.String(l2UrlFlag.Name)
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
var (
l1Backend *l1.Backend
l2Backend *l2.Backend
)
// @todo change nil to actual client after https://scroll-tech/bridge/pull/40 merged
l1Backend, err = l1.New(ctx.Context, cfg.L1Config, ormFactory)
if err != nil {
return err
}
l2Backend, err = l2.New(ctx.Context, cfg.L2Config, ormFactory)
if err != nil {
return err
}
defer func() {
l1Backend.Stop()
l2Backend.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = l1Backend.Start(); err != nil {
log.Crit("couldn't start l1 backend", "error", err)
}
if err = l2Backend.Start(); err != nil {
log.Crit("couldn't start l2 backend", "error", err)
}
apis := l2Backend.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start HTTP api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
app.Run()
}

View File

@@ -3,6 +3,7 @@
"confirmations": 6,
"endpoint": "https://goerli.infura.io/v3/9aa3d95b3bc440fa88ea12eaa4456161",
"l1_messenger_address": "0x0000000000000000000000000000000000000000",
"rollup_contract_address": "0x0000000000000000000000000000000000000000",
"start_height": 0,
"relayer_config": {
"messenger_contract_address": "0x0000000000000000000000000000000000000000",
@@ -50,6 +51,7 @@
"batch_proposer_config": {
"proof_generation_freq": 1,
"batch_gas_threshold": 3000000,
"batch_tx_num_threshold": 135,
"batch_time_sec": 300,
"batch_blocks_limit": 100,
"skipped_opcodes": [

View File

@@ -5,8 +5,6 @@ import (
"os"
"path/filepath"
"scroll-tech/common/utils"
"scroll-tech/database"
)
@@ -30,9 +28,5 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
return cfg, nil
}

View File

@@ -11,7 +11,9 @@ type L1Config struct {
// The start height to sync event from layer 1
StartHeight uint64 `json:"start_height"`
// The messenger contract address deployed on layer 1 chain.
L1MessengerAddress common.Address `json:"l1_messenger_address,omitempty"`
L1MessengerAddress common.Address `json:"l1_messenger_address"`
// The rollup contract address deployed on layer 1 chain.
RollupContractAddress common.Address `json:"rollup_contract_address"`
// The relayer config
RelayerConfig *RelayerConfig `json:"relayer_config"`
}

View File

@@ -24,6 +24,8 @@ type L2Config struct {
type BatchProposerConfig struct {
// Proof generation frequency, generating proof every k blocks
ProofGenerationFreq uint64 `json:"proof_generation_freq"`
// Txnum threshold in a batch
BatchTxNumThreshold uint64 `json:"batch_tx_num_threshold"`
// Gas threshold in a batch
BatchGasThreshold uint64 `json:"batch_gas_threshold"`
// Time waited to generate a batch even if gas_threshold not met

View File

@@ -5,10 +5,11 @@ go 1.18
require (
github.com/iden3/go-iden3-crypto v0.0.13
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
@@ -27,6 +28,7 @@ require (
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -36,8 +38,8 @@ require (
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -336,6 +336,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
@@ -348,8 +350,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -423,8 +425,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -539,8 +541,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -552,7 +554,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -662,5 +664,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -31,7 +31,7 @@ func New(ctx context.Context, cfg *config.L1Config, orm database.OrmFactory) (*B
return nil, err
}
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RelayerConfig.RollupContractAddress, orm)
watcher := NewWatcher(ctx, client, cfg.StartHeight, cfg.Confirmations, cfg.L1MessengerAddress, cfg.RollupContractAddress, orm)
return &Backend{
cfg: cfg,

65
bridge/l1/l1_test.go Normal file
View File

@@ -0,0 +1,65 @@
package l1
import (
"testing"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/bridge/config"
)
var (
// config
cfg *config.Config
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
)
func setupEnv(t *testing.T) {
// Load config.
var err error
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func TestL1(t *testing.T) {
setupEnv(t)
t.Run("testCreateNewL1Relayer", testCreateNewL1Relayer)
t.Run("testStartWatcher", testStartWatcher)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -10,6 +10,7 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
@@ -51,7 +52,8 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
sender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
log.Error("new sender failed", "err", err)
addr := crypto.PubkeyToAddress(cfg.MessageSenderPrivateKeys[0].PublicKey)
log.Error("new sender failed", "main address", addr.String(), "err", err)
return nil, err
}
@@ -70,15 +72,20 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending)
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
}
if len(msgs) > 0 {
log.Info("Processing L1 messages", "count", len(msgs))
}
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process event", "err", err)
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
}
@@ -106,7 +113,13 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
return err
}
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, 0)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
return err
}
@@ -121,6 +134,8 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
// Start the relayer process
func (r *Layer1Relayer) Start() {
log.Info("Starting l1/relayer")
go func() {
// trigger by timer
ticker := time.NewTicker(3 * time.Second)

View File

@@ -1,4 +1,4 @@
package l1_test
package l1
import (
"context"
@@ -9,37 +9,21 @@ import (
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l1"
"scroll-tech/database"
"scroll-tech/common/docker"
)
// TestCreateNewRelayer test create new relayer instance and stop
func TestCreateNewL1Relayer(t *testing.T) {
cfg, err := config.NewConfig("../config.json")
assert.NoError(t, err)
l1docker := docker.NewTestL1Docker(t)
defer l1docker.Stop()
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1docker.Endpoint()
cfg.L1Config.Endpoint = l1docker.Endpoint()
client, err := ethclient.Dial(l1docker.Endpoint())
assert.NoError(t, err)
dbImg := docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
defer dbImg.Stop()
cfg.DBConfig.DSN = dbImg.Endpoint()
// testCreateNewRelayer test create new relayer instance and stop
func testCreateNewL1Relayer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := l1.NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
relayer, err := NewLayer1Relayer(context.Background(), client, 1, db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()

View File

@@ -5,7 +5,7 @@ import (
"math/big"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -81,23 +81,26 @@ func NewWatcher(ctx context.Context, client *ethclient.Client, startHeight uint6
// Start the Watcher module.
func (w *Watcher) Start() {
log.Info("Starting l1/watcher")
go func() {
// trigger by timer
ticker := time.NewTicker(10 * time.Second)
defer ticker.Stop()
for {
for ; true; <-ticker.C {
select {
case <-ticker.C:
case <-w.stop:
return
default:
blockNumber, err := w.client.BlockNumber(w.ctx)
if err != nil {
log.Error("Failed to get block number", "err", err)
continue
}
if err := w.fetchContractEvent(blockNumber); err != nil {
if err := w.FetchContractEvent(blockNumber); err != nil {
log.Error("Failed to fetch bridge contract", "err", err)
}
case <-w.stop:
return
}
}
}()
@@ -111,105 +114,112 @@ func (w *Watcher) Stop() {
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *Watcher) fetchContractEvent(blockHeight uint64) error {
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
if toBlock < fromBlock {
return nil
}
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
}
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
w.rollupAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(toBlock)
return nil
}
log.Info("Received new L1 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
log.Warn("Failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
continue
}
log.Info("Received new L1 events", "fromBlock", from, "toBlock", to, "cnt", len(logs))
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
sentMessageEvents, relayedMessageEvents, rollupEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("Failed to parse emitted events log", "err", err)
return err
}
log.Info("L1 events types", "SentMessageCount", len(sentMessageEvents), "RelayedMessageCount", len(relayedMessageEvents), "RollupEventCount", len(rollupEvents))
// use rollup event to update rollup results db status
var batchIDs []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length")
return nil
}
// use rollup event to update rollup results db status
var batchIDs []string
for _, event := range rollupEvents {
batchIDs = append(batchIDs, event.batchID.String())
}
statuses, err := w.db.GetRollupStatusByIDList(batchIDs)
if err != nil {
log.Error("Failed to GetRollupStatusByIDList", "err", err)
return err
}
if len(statuses) != len(batchIDs) {
log.Error("RollupStatus.Length mismatch with BatchIDs.Length", "RollupStatus.Length", len(statuses), "BatchIDs.Length", len(batchIDs))
return nil
}
for index, event := range rollupEvents {
batchID := event.batchID.String()
status := statuses[index]
if event.status != status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
for index, event := range rollupEvents {
batchID := event.batchID.String()
status := statuses[index]
// only update when db status is before event status
if event.status > status {
if event.status == orm.RollupFinalized {
err = w.db.UpdateFinalizeTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
} else if event.status == orm.RollupCommitted {
err = w.db.UpdateCommitTxHashAndRollupStatus(w.ctx, batchID, event.txHash.String(), event.status)
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update Rollup/Finalize TxHash and Status", "err", err)
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
}
}
}
// Update relayed message first to make sure we don't forget to update submitted message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
if err = w.db.SaveL1Messages(w.ctx, sentMessageEvents); err != nil {
return err
}
w.processedMsgHeight = uint64(to)
}
err = w.db.SaveL1Messages(w.ctx, sentMessageEvents)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
}
return err
return nil
}
func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []relayedMessage, []rollupEvent, error) {
@@ -221,7 +231,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
@@ -242,7 +252,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -253,7 +263,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -264,7 +274,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -275,7 +285,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
@@ -295,7 +305,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
status: orm.RollupCommitted,
})
case common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash

29
bridge/l1/watcher_test.go Normal file
View File

@@ -0,0 +1,29 @@
package l1
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
)
func testStartWatcher(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
client, err := ethclient.Dial(l1gethImg.Endpoint())
assert.NoError(t, err)
l1Cfg := cfg.L1Config
watcher := NewWatcher(context.Background(), client, l1Cfg.StartHeight, l1Cfg.Confirmations, l1Cfg.L1MessengerAddress, l1Cfg.RelayerConfig.RollupContractAddress, db)
watcher.Start()
defer watcher.Stop()
}

View File

@@ -3,7 +3,6 @@ package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/rpc"
@@ -28,7 +27,7 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
relayer, err := NewLayer2Relayer(ctx, client, int64(cfg.Confirmations), orm, cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(ctx, orm, cfg.RelayerConfig)
if err != nil {
return nil, err
}
@@ -67,8 +66,3 @@ func (l2 *Backend) APIs() []rpc.API {
},
}
}
// MockBlockTrace for test case
func (l2 *Backend) MockBlockTrace(blockTrace *types.BlockTrace) {
l2.l2Watcher.Send(blockTrace)
}

View File

@@ -18,9 +18,10 @@ type batchProposer struct {
orm database.OrmFactory
batchTimeSec uint64
batchGasThreshold uint64
batchBlocksLimit uint64
batchTimeSec uint64
batchGasThreshold uint64
batchTxNumThreshold uint64
batchBlocksLimit uint64
proofGenerationFreq uint64
skippedOpcodes map[string]struct{}
@@ -32,13 +33,14 @@ func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory)
orm: orm,
batchTimeSec: cfg.BatchTimeSec,
batchGasThreshold: cfg.BatchGasThreshold,
batchTxNumThreshold: cfg.BatchTxNumThreshold,
batchBlocksLimit: cfg.BatchBlocksLimit,
proofGenerationFreq: cfg.ProofGenerationFreq,
skippedOpcodes: cfg.SkippedOpcodes,
}
}
func (w *batchProposer) tryProposeBatch() error {
func (w *batchProposer) tryProposeBatch() {
w.mutex.Lock()
defer w.mutex.Unlock()
@@ -47,38 +49,53 @@ func (w *batchProposer) tryProposeBatch() error {
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
return err
log.Error("failed to get unbatched blocks", "err", err)
return
}
if len(blocks) == 0 {
return nil
return
}
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
return w.createBatchForBlocks(blocks[:1])
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var (
length = len(blocks)
gasUsed uint64
length = len(blocks)
gasUsed, txNum uint64
)
// add blocks into batch until reach batchGasThreshold
for i, block := range blocks {
if gasUsed+block.GasUsed > w.batchGasThreshold {
if (gasUsed+block.GasUsed > w.batchGasThreshold) || (txNum+block.TxNum > w.batchTxNumThreshold) {
blocks = blocks[:i]
break
}
gasUsed += block.GasUsed
txNum += block.TxNum
}
// if too few gas gathered, but we don't want to halt, we then check the first block in the batch:
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return nil
return
}
return w.createBatchForBlocks(blocks)
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {

View File

@@ -0,0 +1,62 @@
package l2
import (
"encoding/json"
"fmt"
"math/big"
"os"
"testing"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/bridge/config"
"scroll-tech/common/utils"
)
func testBatchProposer(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
trace2 := &types.BlockTrace{}
trace3 := &types.BlockTrace{}
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace2)
assert.NoError(t, err)
data, err = os.ReadFile("../../common/testdata/blockTrace_03.json")
assert.NoError(t, err)
err = json.Unmarshal(data, trace3)
assert.NoError(t, err)
// Insert traces into db.
assert.NoError(t, db.InsertBlockTraces([]*types.BlockTrace{trace2, trace3}))
id := utils.ComputeBatchID(trace3.Header.Hash(), trace2.Header.ParentHash, big.NewInt(1))
proposer := newBatchProposer(&config.BatchProposerConfig{
ProofGenerationFreq: 1,
BatchGasThreshold: 3000000,
BatchTxNumThreshold: 135,
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
proposer.tryProposeBatch()
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))
assert.NoError(t, err)
assert.Equal(t, true, len(infos) == 0)
exist, err := db.BatchRecordExist(id)
assert.NoError(t, err)
assert.Equal(t, true, exist)
}

View File

@@ -1,4 +1,4 @@
package l2_test
package l2
import (
"testing"
@@ -77,6 +77,9 @@ func TestFunction(t *testing.T) {
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)
t.Cleanup(func() {
free(t)

View File

@@ -3,15 +3,19 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"time"
// not sure if this will make problems when relay with l1geth
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
@@ -29,8 +33,7 @@ import (
// Actions are triggered by new head from layer 1 geth node.
// @todo It's better to be triggered by watcher.
type Layer2Relayer struct {
ctx context.Context
client *ethclient.Client
ctx context.Context
db database.OrmFactory
cfg *config.RelayerConfig
@@ -43,20 +46,23 @@ type Layer2Relayer struct {
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// a list of processing batch commitment, indexed by batch id
processingCommitment map[string]string
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// a list of processing batch finalization, indexed by batch id
processingFinalization map[string]string
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
processingFinalization sync.Map
stopCh chan struct{}
}
// NewLayer2Relayer will return a new instance of Layer2RelayerClient
func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2ConfirmNum int64, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.RelayerConfig) (*Layer2Relayer, error) {
// @todo use different sender for relayer, block commit and proof finalize
messageSender, err := sender.NewSender(ctx, cfg.SenderConfig, cfg.MessageSenderPrivateKeys)
if err != nil {
@@ -72,7 +78,6 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2Confir
return &Layer2Relayer{
ctx: ctx,
client: ethClient,
db: db,
messageSender: messageSender,
messageCh: messageSender.ConfirmChan(),
@@ -81,23 +86,50 @@ func NewLayer2Relayer(ctx context.Context, ethClient *ethclient.Client, l2Confir
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: map[string]string{},
processingCommitment: map[string]string{},
processingFinalization: map[string]string{},
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
defer wg.Done()
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
for _, msg := range msgs {
if err := r.processSavedEvent(msg); err != nil {
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
@@ -106,25 +138,13 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
// @todo add support to relay multiple messages
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return err
}
if batch.EndBlockNumber < msg.Height {
// log.Warn("corresponding block not finalized", "status", status)
return nil
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(int64(batch.Index)),
BatchIndex: big.NewInt(0).SetUint64(index),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
@@ -146,7 +166,13 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
return err
}
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data, 0)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
@@ -162,14 +188,15 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage[msg.MsgHash] = msg.MsgHash
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches() {
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
defer wg.Done()
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches()
batchesInDB, err := r.db.GetPendingBatches(1)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -240,27 +267,58 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
hash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
if err != nil && err.Error() == "execution reverted: Parent batch hasn't been committed" {
// check parent is committing
batches, err = r.db.GetBlockBatches(map[string]interface{}{"end_block_hash": batch.ParentHash})
if err != nil || len(batches) == 0 {
log.Error("Failed to get parent batch from db", "batch_id", id, "parent_hash", batch.ParentHash, "err", err)
return
}
parentBatch := batches[0]
if parentBatch.RollupStatus >= orm.RollupCommitting {
// retry with manual gas estimation
gasLimit := estimateCommitBatchGas(len(data), len(layer2Batch.Blocks))
hash, err = r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, gasLimit)
log.Info("commitBatch tx resent with manual gas estimation ", "id", id, "index", batch.Index, "gasLimit", gasLimit, "hash", hash.String(), "err", err)
}
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
}
return
}
log.Info("commitBatch in layer1", "id", id, "index", batch.Index, "hash", hash)
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingCommitment[id] = id
r.processingCommitment.Store(txID, id)
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
defer wg.Done()
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches()
batches, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
@@ -288,6 +346,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
@@ -332,7 +392,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
txHash, err := r.rollupSender.SendTransaction(id, &r.cfg.RollupContractAddress, big.NewInt(0), data)
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data, 0)
hash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
@@ -340,15 +402,15 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
return
}
log.Info("finalizeBatchWithProof in layer1", "id", id, "hash", hash)
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "id", id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
}
success = true
r.processingFinalization[id] = id
r.processingFinalization.Store(txID, id)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
@@ -359,6 +421,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
// Start the relayer process
func (r *Layer2Relayer) Start() {
log.Info("Starting l2/relayer")
go func() {
// trigger by timer
ticker := time.NewTicker(time.Second)
@@ -367,9 +431,12 @@ func (r *Layer2Relayer) Start() {
for {
select {
case <-ticker.C:
r.ProcessSavedEvents()
r.ProcessPendingBatches()
r.ProcessCommittedBatches()
var wg = sync.WaitGroup{}
wg.Add(3)
go r.ProcessSavedEvents(&wg)
go r.ProcessPendingBatches(&wg)
go r.ProcessCommittedBatches(&wg)
wg.Wait()
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
@@ -394,36 +461,45 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage[confirmation.ID]; ok {
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash, orm.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash, "err", err)
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
delete(r.processingMessage, confirmation.ID)
r.processingMessage.Delete(confirmation.ID)
}
// check whether it is block commitment transaction
if batch_id, ok := r.processingCommitment[confirmation.ID]; ok {
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchCommitment"
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupCommitted)
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
delete(r.processingCommitment, confirmation.ID)
r.processingCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batch_id, ok := r.processingFinalization[confirmation.ID]; ok {
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
delete(r.processingFinalization, confirmation.ID)
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}
func estimateCommitBatchGas(callDataLength int, numBlocks int) uint64 {
gasLimit := uint64(0)
gasLimit += 16 * uint64(callDataLength) // calldata cost
gasLimit += 4*2100 + 3*22100 // fixed cost per batch
gasLimit += 4 * 22100 * uint64(numBlocks) // cost per block in batch
gasLimit = gasLimit * 12 / 10 // apply multiplier
return gasLimit
}

View File

@@ -1,18 +1,17 @@
package l2_test
package l2
import (
"context"
"encoding/json"
"math/big"
"os"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/l2"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
@@ -42,7 +41,7 @@ func testCreateNewRelayer(t *testing.T) {
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(cfg.L2Config.Confirmations), db, cfg.L2Config.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, cfg.L2Config.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -57,7 +56,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -76,7 +75,7 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
},
},
}
err = db.InsertBlockTraces(context.Background(), traces)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
@@ -96,7 +95,10 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessSavedEvents(&wg)
wg.Wait()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
@@ -111,7 +113,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -132,7 +134,7 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
assert.NoError(t, err)
traces = append(traces, blockTrace)
err = db.InsertBlockTraces(context.Background(), traces)
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
dbTx, err := db.Beginx()
@@ -152,7 +154,10 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
relayer.ProcessPendingBatches()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessPendingBatches(&wg)
wg.Wait()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
@@ -168,7 +173,7 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := l2.NewLayer2Relayer(context.Background(), l2Cli, int64(l2Cfg.Confirmations), db, l2Cfg.RelayerConfig)
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
@@ -189,9 +194,80 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, provingStatus)
assert.NoError(t, err)
return batchID
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
}
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
}
}

View File

@@ -7,7 +7,7 @@ import (
"reflect"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
@@ -76,70 +76,91 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
// Start the Listening process
func (w *WatcherClient) Start() {
log.Info("Starting l2/watcher")
go func() {
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with DB")
}
lastFetchedBlock, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
panic(fmt.Sprintf("failed to GetBlockTracesLatestHeight in DB: %v", err))
}
ctx, cancel := context.WithCancel(w.ctx)
if lastFetchedBlock < 0 {
lastFetchedBlock = 0
}
lastBlockHeightChangeTime := time.Now()
// trace fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
// trigger by timer
// TODO: make it configurable
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
for {
select {
case <-ticker.C:
// get current height
number, err := w.BlockNumber(w.ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
duration := time.Since(lastBlockHeightChangeTime)
var blockToFetch uint64
if number > uint64(lastFetchedBlock)+w.confirmations {
// latest block height changed
blockToFetch = number - w.confirmations
} else if duration.Seconds() > 60 {
// l2geth didn't produce any blocks more than 1 minute.
blockToFetch = number
}
// fetch at most `blockTracesFetchLimit=10` missing blocks
if blockToFetch > uint64(lastFetchedBlock)+blockTracesFetchLimit {
blockToFetch = uint64(lastFetchedBlock) + blockTracesFetchLimit
}
if lastFetchedBlock != int64(blockToFetch) {
lastFetchedBlock = int64(blockToFetch)
lastBlockHeightChangeTime = time.Now()
}
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
if err := w.tryFetchRunningMissingBlocks(w.ctx, blockToFetch); err != nil {
log.Error("failed to fetchRunningMissingBlocks", "err", err)
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
// @todo handle error
if err := w.fetchContractEvent(number); err != nil {
log.Error("failed to fetchContractEvent", "err", err)
w.tryFetchRunningMissingBlocks(ctx, number)
}
if err := w.batchProposer.tryProposeBatch(); err != nil {
log.Error("failed to tryProposeBatch", "err", err)
}
case <-w.stopCh:
return
}
}
}(ctx)
// event fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
w.FetchContractEvent(number)
}
}
}(ctx)
// batch proposer loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
w.batchProposer.tryProposeBatch()
}
}
}(ctx)
<-w.stopCh
cancel()
}()
}
@@ -151,109 +172,132 @@ func (w *WatcherClient) Stop() {
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, backTrackFrom uint64) error {
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// Get newest block in DB. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
return fmt.Errorf("failed to GetBlockTracesLatestHeight in DB: %v", err)
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
return
}
backTrackTo := uint64(0)
// Can't get trace from genesis block, so the default start number is 1.
var from = uint64(1)
if heightInDB > 0 {
backTrackTo = uint64(heightInDB)
from = uint64(heightInDB) + 1
}
// start backtracking
for ; from <= blockHeight; from += blockTracesFetchLimit {
to := from + blockTracesFetchLimit - 1
if to > blockHeight {
to = blockHeight
}
// Get block traces and insert into db.
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
}
}
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*types.BlockTrace
for number := backTrackFrom; number > backTrackTo; number-- {
for number := from; number <= to; number++ {
log.Debug("retrieving block trace", "height", number)
trace, err2 := w.GetBlockTraceByNumber(ctx, big.NewInt(int64(number)))
if err2 != nil {
return fmt.Errorf("failed to GetBlockResultByHash: %v. number: %v", err2, number)
}
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash)
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
traces = append(traces, trace)
}
if len(traces) > 0 {
if err = w.orm.InsertBlockTraces(ctx, traces); err != nil {
if err := w.orm.InsertBlockTraces(traces); err != nil {
return fmt.Errorf("failed to batch insert BlockTraces: %v", err)
}
}
return nil
}
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
fromBlock := int64(w.processedMsgHeight) + 1
toBlock := int64(blockHeight) - int64(w.confirmations)
toBlock := int64(blockHeight)
if toBlock < fromBlock {
return nil
}
for from := fromBlock; from <= toBlock; from += contractEventsBlocksFetchLimit {
to := from + contractEventsBlocksFetchLimit - 1
if toBlock > fromBlock+contractEventsBlocksFetchLimit {
toBlock = fromBlock + contractEventsBlocksFetchLimit - 1
}
// warning: uint int conversion...
query := ethereum.FilterQuery{
FromBlock: big.NewInt(fromBlock), // inclusive
ToBlock: big.NewInt(toBlock), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return err
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(toBlock)
return nil
}
log.Info("received new L2 messages", "fromBlock", fromBlock, "toBlock", toBlock,
"cnt", len(logs))
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return err
}
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
if to > toBlock {
to = toBlock
}
// warning: uint int conversion...
query := geth.FilterQuery{
FromBlock: big.NewInt(from), // inclusive
ToBlock: big.NewInt(to), // inclusive
Addresses: []common.Address{
w.messengerAddress,
},
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
log.Error("failed to get event logs", "err", err)
return
}
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
continue
}
log.Info("received new L2 messages", "fromBlock", from, "toBlock", to, "cnt", len(logs))
err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents)
if err == nil {
w.processedMsgHeight = uint64(toBlock)
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return
}
// Update relayed message first to make sure we don't forget to update submited message.
// Since, we always start sync from the latest unprocessed message.
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.orm.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return
}
}
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
log.Error("failed to save l2 messages", "err", err)
return
}
w.processedMsgHeight = uint64(to)
}
return err
}
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
@@ -264,7 +308,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
var relayedMessages []relayedMessage
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
@@ -285,7 +329,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -296,7 +340,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -307,7 +351,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}

View File

@@ -1,33 +1,5 @@
package l2
import (
"errors"
"github.com/scroll-tech/go-ethereum/rpc"
)
// WatcherAPI watcher api service
type WatcherAPI interface {
ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error)
}
// ReplayBlockResultByHash temporary interface for easy testing.
func (r *WatcherClient) ReplayBlockResultByHash(blockNrOrHash rpc.BlockNumberOrHash) (bool, error) {
orm := r.orm
params := make(map[string]interface{})
if number, ok := blockNrOrHash.Number(); ok {
params["number"] = int64(number)
}
if hash, ok := blockNrOrHash.Hash(); ok {
params["hash"] = hash.String()
}
if len(params) == 0 {
return false, errors.New("empty params")
}
trace, err := orm.GetBlockTraces(params)
if err != nil {
return false, err
}
r.Send(&trace[0])
return true, nil
}

View File

@@ -1,4 +1,4 @@
package l2_test
package l2
import (
"context"
@@ -15,7 +15,6 @@ import (
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/l2"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/bridge/sender"
@@ -32,7 +31,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
defer l2db.Close()
l2cfg := cfg.L2Config
rc := l2.NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
rc.Start()
defer rc.Stop()
@@ -45,7 +44,7 @@ func testCreateNewWatcherAndStop(t *testing.T) {
numTransactions := 3
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
for i := 0; i < numTransactions; i++ {
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil)
_, err = newSender.SendTransaction(strconv.Itoa(1000+i), &toAddress, big.NewInt(1000000000), nil, 0)
assert.NoError(t, err)
<-newSender.ConfirmChan()
}
@@ -68,7 +67,7 @@ func testMonitorBridgeContract(t *testing.T) {
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
// deploy mock bridge
_, tx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
_, tx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
@@ -80,7 +79,10 @@ func testMonitorBridgeContract(t *testing.T) {
// Call mock_bridge instance sendMessage to trigger emit events
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
@@ -90,7 +92,7 @@ func testMonitorBridgeContract(t *testing.T) {
// extra block mined
toAddress = common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message = []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
@@ -110,7 +112,7 @@ func testMonitorBridgeContract(t *testing.T) {
assert.NoError(t, err)
t.Log("Height in DB is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
@@ -127,7 +129,7 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
auth := prepareAuth(t, l2Cli, cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys[0])
_, trx, instance, err := mock_bridge.DeployMockBridge(auth, l2Cli)
_, trx, instance, err := mock_bridge.DeployMockBridgeL2(auth, l2Cli)
assert.NoError(t, err)
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
@@ -147,7 +149,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
}
@@ -163,7 +167,9 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
auth.Nonce = big.NewInt(int64(nonce))
toAddress := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
message := []byte("testbridgecontract")
tx, err = instance.SendMessage(auth, toAddress, message, auth.GasPrice)
fee := big.NewInt(0)
gasLimit := big.NewInt(1)
tx, err = instance.SendMessage(auth, toAddress, fee, message, gasLimit)
assert.NoError(t, err)
receipt, err = bind.WaitMined(context.Background(), l2Cli, tx)
if receipt.Status != types.ReceiptStatusSuccessful || err != nil {
@@ -178,13 +184,13 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *l2.WatcherClient {
return l2.NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
}
func prepareAuth(t *testing.T, l2Cli *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {

View File

@@ -0,0 +1,187 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL1 {
/*********************************
* Events from L1ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/************************
* Events from ZKRollup *
************************/
/// @notice Emitted when a new batch is commited.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event CommitBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/// @notice Emitted when a batch is reverted.
/// @param _batchId The identification of the batch.
event RevertBatch(bytes32 indexed _batchId);
/// @notice Emitted when a batch is finalized.
/// @param _batchHash The hash of the batch
/// @param _batchIndex The index of the batch
/// @param _parentHash The hash of parent batch
event FinalizeBatch(bytes32 indexed _batchId, bytes32 _batchHash, uint256 _batchIndex, bytes32 _parentHash);
/***********
* Structs *
***********/
struct L2MessageProof {
uint256 batchIndex;
uint256 blockHeight;
bytes merkleProof;
}
/// @dev The transanction struct
struct Layer2Transaction {
address caller;
uint64 nonce;
address target;
uint64 gas;
uint256 gasPrice;
uint256 value;
bytes data;
// signature
uint256 r;
uint256 s;
uint64 v;
}
/// @dev The block header struct
struct Layer2BlockHeader {
bytes32 blockHash;
bytes32 parentHash;
uint256 baseFee;
bytes32 stateRoot;
uint64 blockHeight;
uint64 gasUsed;
uint64 timestamp;
bytes extraData;
Layer2Transaction[] txs;
}
/// @dev The batch struct, the batch hash is always the last block hash of `blocks`.
struct Layer2Batch {
uint64 batchIndex;
// The hash of the last block in the parent batch
bytes32 parentHash;
Layer2BlockHeader[] blocks;
}
struct Layer2BatchStored {
bytes32 batchHash;
bytes32 parentHash;
uint64 batchIndex;
bool verified;
}
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/// @notice Mapping from batch id to batch struct.
mapping(bytes32 => Layer2BatchStored) public batches;
/************************************
* Functions from L1ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
uint256 _nonce = messageNonce;
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce += 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message,
L2MessageProof memory
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
/***************************
* Functions from ZKRollup *
***************************/
function commitBatch(Layer2Batch memory _batch) external {
bytes32 _batchHash = _batch.blocks[_batch.blocks.length - 1].blockHash;
bytes32 _batchId = _computeBatchId(_batchHash, _batch.parentHash, _batch.batchIndex);
Layer2BatchStored storage _batchStored = batches[_batchId];
_batchStored.batchHash = _batchHash;
_batchStored.parentHash = _batch.parentHash;
_batchStored.batchIndex = _batch.batchIndex;
emit CommitBatch(_batchId, _batchHash, _batch.batchIndex, _batch.parentHash);
}
function revertBatch(bytes32 _batchId) external {
emit RevertBatch(_batchId);
}
function finalizeBatchWithProof(
bytes32 _batchId,
uint256[] memory,
uint256[] memory
) external {
Layer2BatchStored storage _batch = batches[_batchId];
uint256 _batchIndex = _batch.batchIndex;
emit FinalizeBatch(_batchId, _batch.batchHash, _batchIndex, _batch.parentHash);
}
/// @dev Internal function to compute a unique batch id for mapping.
/// @param _batchHash The hash of the batch.
/// @param _parentHash The hash of the batch.
/// @param _batchIndex The index of the batch.
/// @return Return the computed batch id.
function _computeBatchId(
bytes32 _batchHash,
bytes32 _parentHash,
uint256 _batchIndex
) internal pure returns (bytes32) {
return keccak256(abi.encode(_batchHash, _parentHash, _batchIndex));
}
}

View File

@@ -0,0 +1,67 @@
// SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract MockBridgeL2 {
/*********************************
* Events from L2ScrollMessenger *
*********************************/
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
event MessageDropped(bytes32 indexed msgHash);
event RelayedMessage(bytes32 indexed msgHash);
event FailedRelayedMessage(bytes32 indexed msgHash);
/*************
* Variables *
*************/
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
/************************************
* Functions from L2ScrollMessenger *
************************************/
function sendMessage(
address _to,
uint256 _fee,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
uint256 _nonce = messageNonce;
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
bytes32 _msghash = keccak256(abi.encodePacked(msg.sender, _to, _value, _fee, _deadline, _nonce, _message));
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
messageNonce = _nonce + 1;
}
function relayMessageWithProof(
address _from,
address _to,
uint256 _value,
uint256 _fee,
uint256 _deadline,
uint256 _nonce,
bytes memory _message
) external {
bytes32 _msghash = keccak256(abi.encodePacked(_from, _to, _value, _fee, _deadline, _nonce, _message));
emit RelayedMessage(_msghash);
}
}

View File

@@ -1,42 +0,0 @@
//SPDX-License-Identifier: UNLICENSED
pragma solidity ^0.8.0;
contract Mock_Bridge {
event SentMessage(
address indexed target,
address sender,
uint256 value,
uint256 fee,
uint256 deadline,
bytes message,
uint256 messageNonce,
uint256 gasLimit
);
/// @notice Message nonce, used to avoid relay attack.
uint256 public messageNonce;
function sendMessage(
address _to,
bytes memory _message,
uint256 _gasLimit
) external payable {
// solhint-disable-next-line not-rely-on-time
uint256 _deadline = block.timestamp + 1 days;
// @todo compute fee
uint256 _fee = 0;
uint256 _nonce = messageNonce;
require(msg.value >= _fee, "cannot pay fee");
uint256 _value;
unchecked {
_value = msg.value - _fee;
}
emit SentMessage(_to, msg.sender, _value, _fee, _deadline, _message, _nonce, _gasLimit);
unchecked {
messageNonce = _nonce + 1;
}
}
}

View File

@@ -12,7 +12,7 @@ import (
"sync/atomic"
"time"
"github.com/scroll-tech/go-ethereum"
geth "github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/math"
@@ -154,18 +154,21 @@ func (s *Sender) NumberOfAccounts() int {
return len(s.auths.accounts)
}
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte) (*FeeData, error) {
// estimate gas limit
gasLimit, err := s.client.EstimateGas(s.ctx, ethereum.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, value *big.Int, data []byte, gasLimit uint64) (*FeeData, error) {
if gasLimit == 0 {
// estimate gas limit
var err error
gasLimit, err = s.client.EstimateGas(s.ctx, geth.CallMsg{From: auth.From, To: target, Value: value, Data: data})
if err != nil {
return nil, err
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
}
gasLimit = gasLimit * 15 / 10 // 50% extra gas to void out of gas error
// @todo change it when Scroll enable EIP1559
if s.config.TxType != DynamicFeeTxType {
// estimate gas price
var gasPrice *big.Int
gasPrice, err = s.client.SuggestGasPrice(s.ctx)
gasPrice, err := s.client.SuggestGasPrice(s.ctx)
if err != nil {
return nil, err
}
@@ -189,7 +192,7 @@ func (s *Sender) getFeeData(auth *bind.TransactOpts, target *common.Address, val
}
// SendTransaction send a signed L2tL1 transaction.
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte) (hash common.Hash, err error) {
func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.Int, data []byte, gasLimit uint64) (hash common.Hash, err error) {
// We occupy the ID, in case some other threads call with the same ID in the same time
if _, loaded := s.pendingTxs.LoadOrStore(ID, nil); loaded {
return common.Hash{}, fmt.Errorf("has the repeat tx ID, ID: %s", ID)
@@ -213,9 +216,10 @@ func (s *Sender) SendTransaction(ID string, target *common.Address, value *big.I
tx *types.Transaction
)
// estimate gas fee
if feeData, err = s.getFeeData(auth, target, value, data); err != nil {
if feeData, err = s.getFeeData(auth, target, value, data, gasLimit); err != nil {
return
}
if tx, err = s.createAndSendTx(auth, feeData, target, value, data, nil); err == nil {
// add pending transaction to queue
pending := &PendingTransaction{

View File

@@ -22,7 +22,7 @@ import (
"scroll-tech/bridge/sender"
)
const TX_BATCH = 50
const TXBatch = 50
var (
privateKeys []*ecdsa.PrivateKey
@@ -84,10 +84,10 @@ func testBatchSender(t *testing.T, batchSize int) {
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TX_BATCH; i++ {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil, 0)
if errors.Is(err, sender.ErrNoAvailableAccount) {
<-time.After(time.Second)
continue
@@ -103,7 +103,7 @@ func testBatchSender(t *testing.T, batchSize int) {
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TX_BATCH*newSender.NumberOfAccounts())
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)

204
bridge/tests/bridge_test.go Normal file
View File

@@ -0,0 +1,204 @@
package tests
import (
"context"
"crypto/ecdsa"
"math/big"
"scroll-tech/common/docker"
"testing"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
)
var (
// config
cfg *config.Config
// private key
privateKey *ecdsa.PrivateKey
// docker consider handler.
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
// clients
l1Client *ethclient.Client
l2Client *ethclient.Client
// auth
l1Auth *bind.TransactOpts
l2Auth *bind.TransactOpts
// l1 messenger contract
l1MessengerInstance *mock_bridge.MockBridgeL1
l1MessengerAddress common.Address
// l1 rollup contract
l1RollupInstance *mock_bridge.MockBridgeL1
l1RollupAddress common.Address
// l2 messenger contract
l2MessengerInstance *mock_bridge.MockBridgeL2
l2MessengerAddress common.Address
)
func setupEnv(t *testing.T) {
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = 0
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.Confirmations = 0
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
cfg.L2Config.RelayerConfig.SenderConfig.Endpoint = l1gethImg.Endpoint()
cfg.L1Config.Endpoint = l1gethImg.Endpoint()
// Create l2geth container.
l2gethImg = docker.NewTestL2Docker(t)
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
assert.NoError(t, err)
l2Client, err = ethclient.Dial(cfg.L2Config.Endpoint)
assert.NoError(t, err)
// Create l1 and l2 auth
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if l1gethImg != nil {
assert.NoError(t, l1gethImg.Stop())
}
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
}
func prepareContracts(t *testing.T) {
var err error
var tx *types.Transaction
// L1 messenger contract
_, tx, l1MessengerInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1MessengerAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L1 rollup contract
_, tx, l1RollupInstance, err = mock_bridge.DeployMockBridgeL1(l1Auth, l1Client)
assert.NoError(t, err)
l1RollupAddress, err = bind.WaitDeployed(context.Background(), l1Client, tx)
assert.NoError(t, err)
// L2 messenger contract
_, tx, l2MessengerInstance, err = mock_bridge.DeployMockBridgeL2(l2Auth, l2Client)
assert.NoError(t, err)
l2MessengerAddress, err = bind.WaitDeployed(context.Background(), l2Client, tx)
assert.NoError(t, err)
cfg.L1Config.L1MessengerAddress = l1MessengerAddress
cfg.L1Config.RollupContractAddress = l1RollupAddress
cfg.L1Config.RelayerConfig.MessengerContractAddress = l2MessengerAddress
cfg.L2Config.L2MessengerAddress = l2MessengerAddress
cfg.L2Config.RelayerConfig.MessengerContractAddress = l1MessengerAddress
cfg.L2Config.RelayerConfig.RollupContractAddress = l1RollupAddress
}
func prepareAuth(t *testing.T, client *ethclient.Client, privateKey *ecdsa.PrivateKey) *bind.TransactOpts {
chainID, err := client.ChainID(context.Background())
assert.NoError(t, err)
auth, err := bind.NewKeyedTransactorWithChainID(privateKey, chainID)
assert.NoError(t, err)
auth.Value = big.NewInt(0) // in wei
assert.NoError(t, err)
return auth
}
func TestFunction(t *testing.T) {
setupEnv(t)
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Cleanup(func() {
free(t)
})
}

View File

@@ -0,0 +1,175 @@
package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var wg sync.WaitGroup
wg.Add(3)
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
l2Watcher := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents(&wg)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
}

138
bridge/tests/rollup_test.go Normal file
View File

@@ -0,0 +1,138 @@
package tests
import (
"context"
"math/big"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"sync"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testCommitBatchAndFinalizeBatch(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// add some blocks to db
var traces []*types.BlockTrace
var parentHash common.Hash
for i := 1; i <= 10; i++ {
header := types.Header{
Number: big.NewInt(int64(i)),
ParentHash: parentHash,
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
}
traces = append(traces, &types.BlockTrace{
Header: &header,
StorageTrace: &types.StorageTrace{},
})
parentHash = header.Hash()
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add one batch to db
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[1].Header.Number.Uint64(),
Hash: traces[1].Header.Hash().String(),
ParentHash: traces[1].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676) // parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[1].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
wg.Add(1)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
}

View File

@@ -20,8 +20,8 @@ func encodePacked(input ...[]byte) []byte {
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
target common.Address,
sender common.Address,
target common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
@@ -29,8 +29,8 @@ func ComputeMessageHash(
messageNonce *big.Int,
) common.Hash {
packed := encodePacked(
target.Bytes(),
sender.Bytes(),
target.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),

View File

@@ -7,6 +7,7 @@ import (
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
@@ -28,15 +29,13 @@ func TestKeccak2(t *testing.T) {
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xdafea492d9c6733ae3d56b7ed1adb60692c98bc5"),
common.HexToAddress("0xeafea492d9c6733ae3d56b7ed1adb60692c98bf7"),
big.NewInt(1),
big.NewInt(2),
big.NewInt(1234567),
common.Hex2Bytes("0011223344"),
big.NewInt(3),
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
)
if hash != common.HexToHash("0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b") {
t.Fatalf("Invalid ComputeMessageHash, want %s, got %s", "0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b", hash.Hex())
}
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
}

View File

@@ -179,6 +179,13 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -200,16 +207,7 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
# Exclude some staticcheck messages
@@ -217,18 +215,6 @@ issues:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll

View File

@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build bridge

View File

@@ -1,5 +1,5 @@
assets/
docs/
integration-test/
l2geth/
rpc-gateway/
*target/*

View File

@@ -1,5 +1,5 @@
# Build libzkp dependency
FROM scrolltech/go-rust-builder:go-1.17-rust-nightly-2022-08-23 as chef
FROM scrolltech/go-rust-builder:go-1.18-rust-nightly-2022-08-23 as chef
WORKDIR app
FROM chef as planner
@@ -24,6 +24,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x

View File

@@ -1,6 +1,6 @@
assets/
contracts/
docs/
integration-test/
l2geth/
rpc-gateway/
*target/*

View File

@@ -8,6 +8,7 @@ COPY ./common/go.* ./common/
COPY ./coordinator/go.* ./coordinator/
COPY ./database/go.* ./database/
COPY ./roller/go.* ./roller/
COPY ./tests/integration-test/go.* ./tests/integration-test/
RUN go mod download -x
# Build db_cli

View File

@@ -1,6 +1,6 @@
assets/
contracts/
docs/
integration-test/
l2geth/
rpc-gateway/
*target/*

View File

@@ -0,0 +1,14 @@
#!/bin/bash
set -uex
${GOROOT}/bin/bin/gocover-cobertura < coverage.bridge.txt > coverage.bridge.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.db.txt > coverage.db.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.common.txt > coverage.common.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.coordinator.txt > coverage.coordinator.xml
${GOROOT}/bin/bin/gocover-cobertura < coverage.integration.txt > coverage.integration.xml
npx cobertura-merge -o cobertura.xml \
package1=coverage.bridge.xml \
package2=coverage.db.xml \
package3=coverage.common.xml \
package4=coverage.coordinator.xml \
package5=coverage.integration.xml

View File

@@ -0,0 +1,63 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
// LOG_DOCKER = 'true'
}
stages {
stage('Tag') {
steps {
script {
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
sh "echo ${TAGNAME}"
// ...
}
}
}
stage('Build') {
environment {
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
DOCKER_CREDENTIALS = credentials('dockerhub')
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
// Use a scripted pipeline.
script {
stage('Push image') {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
}
}
}
}
}
}
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
}
}
}

89
common/cmd/cmd.go Normal file
View File

@@ -0,0 +1,89 @@
package cmd
import (
"os"
"os/exec"
"strings"
"sync"
"testing"
cmap "github.com/orcaman/concurrent-map"
)
var verbose bool
func init() {
v := os.Getenv("LOG_DOCKER")
if v == "true" || v == "TRUE" {
verbose = true
}
}
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
name string
args []string
mu sync.Mutex
cmd *exec.Cmd
checkFuncs cmap.ConcurrentMap //map[string]checkFunc
//stdout bytes.Buffer
Err error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T, name string, args ...string) *Cmd {
return &Cmd{
T: t,
checkFuncs: cmap.New(),
name: name,
args: args,
}
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Set(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
t.checkFuncs.Pop(key)
}
func (t *Cmd) runCmd() {
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(parallel bool) {
t.Log("cmd: ", t.args)
if parallel {
go t.runCmd()
} else {
t.runCmd()
}
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
t.Logf("%s: %v", t.name, out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
t.Logf("%s: %v", t.name, out)
}
go t.checkFuncs.IterCb(func(_ string, value interface{}) {
check := value.(checkFunc)
check(out)
})
return len(data), nil
}

114
common/cmd/cmd_app.go Normal file
View File

@@ -0,0 +1,114 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(waitResult func() bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
}()
waitResult()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// WaitResult return true when get the keyword during timeout.
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
if keyword == "" {
return false
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return true
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
return false
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}

42
common/cmd/cmd_test.go Normal file
View File

@@ -0,0 +1,42 @@
package cmd_test
import (
"fmt"
"strings"
"testing"
"time"
"github.com/stretchr/testify/assert"
"scroll-tech/common/cmd"
)
func TestCmd(t *testing.T) {
app := cmd.NewCmd(t, "curTime", "date", "+%Y-%m-%d")
tm := time.Now()
curTime := fmt.Sprintf("%d-%02d-%02d", tm.Year(), tm.Month(), tm.Day())
okCh := make(chan struct{}, 1)
app.RegistFunc(curTime, func(buf string) {
if strings.Contains(buf, curTime) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer app.UnRegistFunc(curTime)
// Run cmd.
app.RunCmd(true)
// Wait result.
select {
case <-okCh:
return
case <-time.After(time.Second):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", curTime))
}
}

View File

@@ -1,98 +0,0 @@
package docker
import (
"errors"
"os"
"os/exec"
"strings"
"sync"
"testing"
)
var verbose bool
func init() {
v := os.Getenv("LOG_DOCKER")
if v == "true" || v == "TRUE" {
verbose = true
}
}
type checkFunc func(buf string)
// Cmd struct
type Cmd struct {
*testing.T
checkFuncs sync.Map //map[string]checkFunc
//stdout bytes.Buffer
errMsg chan error
}
// NewCmd create Cmd instance.
func NewCmd(t *testing.T) *Cmd {
cmd := &Cmd{
T: t,
//stdout: bytes.Buffer{},
errMsg: make(chan error, 2),
}
// Handle panic.
cmd.RegistFunc("panic", func(buf string) {
if strings.Contains(buf, "panic") {
cmd.errMsg <- errors.New(buf)
}
})
return cmd
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Store(key, check)
}
// UnRegistFunc unregister check func
func (t *Cmd) UnRegistFunc(key string) {
if _, ok := t.checkFuncs.Load(key); ok {
t.checkFuncs.Delete(key)
}
}
// RunCmd parallel running when parallel is true.
func (t *Cmd) RunCmd(args []string, parallel bool) {
t.Log("RunCmd cmd", args)
if parallel {
go t.runCmd(args)
} else {
t.runCmd(args)
}
}
// ErrMsg return error output channel
func (t *Cmd) ErrMsg() <-chan error {
return t.errMsg
}
func (t *Cmd) Write(data []byte) (int, error) {
out := string(data)
if verbose {
t.Logf(out)
} else if strings.Contains(out, "error") || strings.Contains(out, "warning") {
t.Logf(out)
}
go func(content string) {
t.checkFuncs.Range(func(key, value any) bool {
check := value.(checkFunc)
check(content)
return true
})
}(out)
return len(data), nil
}
func (t *Cmd) runCmd(args []string) {
cmd := exec.Command(args[0], args[1:]...) //nolint:gosec
cmd.Stdout = t
cmd.Stderr = t
_ = cmd.Run()
}

View File

@@ -8,6 +8,9 @@ import (
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgDB the postgres image manager.
@@ -21,19 +24,20 @@ type ImgDB struct {
password string
running bool
*Cmd
cmd *cmd.Cmd
}
// NewImgDB return postgres db img instance.
func NewImgDB(t *testing.T, image, password, dbName string, port int) ImgInstance {
return &ImgDB{
img := &ImgDB{
image: image,
name: fmt.Sprintf("%s-%s_%d", image, dbName, port),
password: password,
dbName: dbName,
port: port,
Cmd: NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start postgres db container.
@@ -42,7 +46,7 @@ func (i *ImgDB) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.Cmd.RunCmd(i.prepare(), true)
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -59,14 +63,13 @@ func (i *ImgDB) Stop() error {
i.running = false
ctx := context.Background()
// check if container is running, stop the running container.
id := GetContainerID(i.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
return err
}
i.id = id
// stop the running container.
if i.id == "" {
i.id = GetContainerID(i.name)
}
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, i.id, &timeout); err != nil {
return err
}
// remove the stopped container.
return cli.ContainerRemove(ctx, i.id, types.ContainerRemoveOptions{})
@@ -94,7 +97,7 @@ func (i *ImgDB) prepare() []string {
func (i *ImgDB) isOk() bool {
keyword := "database system is ready to accept connections"
okCh := make(chan struct{}, 1)
i.RegistFunc(keyword, func(buf string) {
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -103,14 +106,16 @@ func (i *ImgDB) isOk() bool {
}
}
})
defer i.UnRegistFunc(keyword)
defer i.cmd.UnRegistFunc(keyword)
select {
case <-okCh:
time.Sleep(time.Millisecond * 1500)
i.id = GetContainerID(i.name)
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case <-time.NewTimer(time.Second * 10).C:
case <-time.After(time.Second * 20):
return false
}
}

View File

@@ -9,6 +9,9 @@ import (
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgGeth the geth image manager include l1geth and l2geth.
@@ -23,20 +26,21 @@ type ImgGeth struct {
wsPort int
running bool
*Cmd
cmd *cmd.Cmd
}
// NewImgGeth return geth img instance.
func NewImgGeth(t *testing.T, image, volume, ipc string, hPort, wPort int) ImgInstance {
return &ImgGeth{
img := &ImgGeth{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
volume: volume,
ipcPath: ipc,
httpPort: hPort,
wsPort: wPort,
Cmd: NewCmd(t),
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start run image and check if it is running healthily.
@@ -45,7 +49,7 @@ func (i *ImgGeth) Start() error {
if id != "" {
return fmt.Errorf("container already exist, name: %s", i.name)
}
i.Cmd.RunCmd(i.prepare(), true)
i.cmd.RunCmd(true)
i.running = i.isOk()
if !i.running {
_ = i.Stop()
@@ -72,7 +76,7 @@ func (i *ImgGeth) Endpoint() string {
func (i *ImgGeth) isOk() bool {
keyword := "WebSocket enabled"
okCh := make(chan struct{}, 1)
i.RegistFunc(keyword, func(buf string) {
i.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
@@ -81,13 +85,16 @@ func (i *ImgGeth) isOk() bool {
}
}
})
defer i.UnRegistFunc(keyword)
defer i.cmd.UnRegistFunc(keyword)
select {
case <-okCh:
i.id = GetContainerID(i.name)
utils.TryTimes(3, func() bool {
i.id = GetContainerID(i.name)
return i.id != ""
})
return i.id != ""
case <-time.NewTimer(time.Second * 10).C:
case <-time.After(time.Second * 10):
return false
}
}

View File

@@ -10,12 +10,19 @@ import (
"github.com/stretchr/testify/assert"
)
func TestL1Geth(t *testing.T) {
func TestDocker(t *testing.T) {
t.Parallel()
t.Run("testL1Geth", testL1Geth)
t.Run("testL2Geth", testL2Geth)
t.Run("testDB", testDB)
}
func testL1Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewImgGeth(t, "scroll_l1geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
img := NewTestL1Docker(t)
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -26,12 +33,11 @@ func TestL1Geth(t *testing.T) {
t.Logf("chainId: %s", chainID.String())
}
func TestL2Geth(t *testing.T) {
func testL2Geth(t *testing.T) {
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
img := NewImgGeth(t, "scroll_l2geth", "", "", 8535, 0)
assert.NoError(t, img.Start())
img := NewTestL2Docker(t)
defer img.Stop()
client, err := ethclient.Dial(img.Endpoint())
@@ -42,7 +48,7 @@ func TestL2Geth(t *testing.T) {
t.Logf("chainId: %s", chainID.String())
}
func TestDB(t *testing.T) {
func testDB(t *testing.T) {
driverName := "postgres"
dbImg := NewTestDBDocker(t, driverName)
defer dbImg.Stop()

View File

@@ -36,7 +36,7 @@ func GetContainerID(name string) string {
Filters: filter,
})
if len(lst) > 0 {
return lst[0].Names[0]
return lst[0].ID
}
return ""
}

View File

@@ -1,11 +1,16 @@
package docker
import (
"context"
"crypto/rand"
"math/big"
"testing"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/common/utils"
)
var (
@@ -19,6 +24,18 @@ func NewTestL1Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL1geth := NewImgGeth(t, "scroll_l1geth", "", "", 0, l1StartPort+int(id.Int64()))
assert.NoError(t, imgL1geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL1geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL1geth
}
@@ -27,6 +44,18 @@ func NewTestL2Docker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgL2geth := NewImgGeth(t, "scroll_l2geth", "", "", 0, l2StartPort+int(id.Int64()))
assert.NoError(t, imgL2geth.Start())
// try 3 times to get chainID until is ok.
utils.TryTimes(3, func() bool {
client, _ := ethclient.Dial(imgL2geth.Endpoint())
if client != nil {
if _, err := client.ChainID(context.Background()); err == nil {
return true
}
}
return false
})
return imgL2geth
}
@@ -35,5 +64,15 @@ func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgDB := NewImgDB(t, driverName, "123456", "test_db", dbStartPort+int(id.Int64()))
assert.NoError(t, imgDB.Start())
// try 5 times until the db is ready.
utils.TryTimes(5, func() bool {
db, _ := sqlx.Open(driverName, imgDB.Endpoint())
if db != nil {
return db.Ping() == nil
}
return false
})
return imgDB
}

View File

@@ -3,15 +3,15 @@ module scroll-tech/common
go 1.18
require (
github.com/docker/docker v20.10.17+incompatible
github.com/docker/docker v20.10.21+incompatible
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
)
require (
@@ -77,11 +77,12 @@ require (
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.3.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -112,8 +112,8 @@ github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwu
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.17+incompatible h1:JYCuMrWaVNophQTOrMMoSwudOVEfcegoZZrleKc1xwE=
github.com/docker/docker v20.10.17+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v20.10.21+incompatible h1:UTLdBmHk3bEY+w8qeO5KttOhy6OmXWsl/FEet9Uswog=
github.com/docker/docker v20.10.21+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/go-connections v0.4.0 h1:El9xVISelRB7BuFusrZozjnkIM5YnzCViNKohAFqRJQ=
github.com/docker/go-connections v0.4.0/go.mod h1:Gbd7IOopHjR8Iph03tsViu4nIes5XhDvyHbTtUxmeec=
github.com/docker/go-units v0.5.0 h1:69rxXcBk27SvSaaxTtLh/8llcHD8vYHT7WSdRZ/jvr4=
@@ -364,6 +364,8 @@ github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFSt
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/orcaman/concurrent-map v1.0.0 h1:I/2A2XPCb4IuQWcQhBhSwGfiuybl/J0ev9HDbW65HOY=
github.com/orcaman/concurrent-map v1.0.0/go.mod h1:Lu3tH6HLW3feq74c2GC+jIMS/K2CFcDWnWD9XkenwhI=
github.com/paulbellamy/ratecounter v0.2.0/go.mod h1:Hfx1hDpSGoqxkVVpBi/IlYD7kChlfo5C6hzIHwPqfFE=
github.com/peterh/liner v1.0.1-0.20180619022028-8c1271fcf47f/go.mod h1:xIteQHvHuaLYG9IFj6mSxM0fCKrs34IrEQUhOYuGPHc=
github.com/peterh/liner v1.1.1-0.20190123174540-a2c9a5303de7 h1:oYW+YCJ1pachXTQmzR3rNLYGGz4g/UgFcjb28p/viDM=
@@ -402,8 +404,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -480,8 +482,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -539,8 +541,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -604,8 +606,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -617,8 +619,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@@ -1380,7 +1380,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0902#b0ffab97316f9cf4b9e65aba398a047a8d6424a1"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0920-fix#d6bd0f291d41c4585e783d2d94a77fd80e1ba47e"
dependencies = [
"bitvec 0.22.3",
"ff 0.11.1",
@@ -1451,7 +1451,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6f18f38e82d302cd8b6ce8809b59c32350b019a3"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6b8c8a07da9fbf5d5878831e35360b8c5e6d89a3"
dependencies = [
"blake2b_simd",
"cfg-if 0.1.10",
@@ -2397,7 +2397,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/appliedzkp/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/appliedzkp/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -2407,7 +2407,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -3389,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"base64 0.13.0",
"blake2",
@@ -3816,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zkevm"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"anyhow",
"blake2",

View File

@@ -8,8 +8,8 @@ edition = "2021"
crate-type = ["staticlib"]
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/common-rs" }
types = { git = "https://github.com/scroll-tech/common-rs" }
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
log = "0.4"
env_logger = "0.9.0"

View File

@@ -4,12 +4,12 @@ import (
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// RespStatus represents status code from roller to scroll
@@ -36,12 +36,12 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Time of message creation
Timestamp int64 `json:"timestamp"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
// curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
Token string `json:"token"`
@@ -115,12 +115,11 @@ func (a *AuthMsg) PublicKey() (string, error) {
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
bs, err := json.Marshal(i)
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
@@ -204,12 +203,12 @@ type ProofDetail struct {
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
bs, err := json.Marshal(z)
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -4,6 +4,7 @@ import (
"testing"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
)
@@ -15,7 +16,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testRoller",
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -23,4 +24,10 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
ok, err := authMsg.Verify()
assert.NoError(t, err)
assert.Equal(t, true, ok)
// Check public key is ok.
pub, err := authMsg.PublicKey()
assert.NoError(t, err)
pubkey := crypto.CompressPubkey(&privkey.PublicKey)
assert.Equal(t, pub, common.Bytes2Hex(pubkey))
}

View File

@@ -1,12 +0,0 @@
package utils
import "os"
// GetEnvWithDefault get value from env if is none use the default
func GetEnvWithDefault(key string, defult string) string {
val := os.Getenv(key)
if len(val) == 0 {
val = defult
}
return val
}

View File

@@ -1,6 +1,8 @@
package utils
import "github.com/urfave/cli/v2"
import (
"github.com/urfave/cli/v2"
)
var (
// CommonFlags is used for app common flags in different modules

77
common/utils/rpc_test.go Normal file
View File

@@ -0,0 +1,77 @@
package utils
import (
"context"
"testing"
"github.com/scroll-tech/go-ethereum/rpc"
"github.com/stretchr/testify/assert"
)
type testService struct{}
type echoArgs struct {
S string
}
type echoResult struct {
Name string
ID int
Args *echoArgs
}
func (s *testService) NoArgsRets() {}
func (s *testService) Echo(str string, i int, args *echoArgs) echoResult {
return echoResult{str, i, args}
}
func TestStartHTTPEndpoint(t *testing.T) {
endpoint := "localhost:18080"
handler, _, err := StartHTTPEndpoint(endpoint, []rpc.API{
{
Public: true,
Namespace: "test",
Service: new(testService),
},
})
assert.NoError(t, err)
defer handler.Shutdown(context.Background())
client, err := rpc.Dial("http://" + endpoint)
assert.NoError(t, err)
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
result := echoResult{}
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
assert.Equal(t, 0, result.ID)
assert.Equal(t, "test", result.Name)
defer client.Close()
}
func TestStartWSEndpoint(t *testing.T) {
endpoint := "localhost:18081"
handler, _, err := StartWSEndpoint(endpoint, []rpc.API{
{
Public: true,
Namespace: "test",
Service: new(testService),
},
})
assert.NoError(t, err)
defer handler.Shutdown(context.Background())
client, err := rpc.Dial("ws://" + endpoint)
assert.NoError(t, err)
assert.NoError(t, client.Call(nil, "test_noArgsRets"))
result := echoResult{}
assert.NoError(t, client.Call(&result, "test_echo", "test", 0, &echoArgs{S: "test"}))
assert.Equal(t, 0, result.ID)
assert.Equal(t, "test", result.Name)
defer client.Close()
}

View File

@@ -0,0 +1,22 @@
package utils
import (
"fmt"
"os"
"github.com/docker/docker/pkg/reexec"
"github.com/urfave/cli/v2"
)
// RegisterSimulation register initializer function for integration-test.
func RegisterSimulation(app *cli.App, name string) {
// Run the app for integration-test
reexec.Register(name, func() {
if err := app.Run(os.Args); err != nil {
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
os.Exit(0)
})
reexec.Init()
}

View File

@@ -1,30 +1,10 @@
package utils
import (
"sync/atomic"
"github.com/scroll-tech/go-ethereum/core/types"
"golang.org/x/sync/errgroup"
)
// ComputeTraceGasCost computes gascost based on ExecutionResults.StructLogs.GasCost
func ComputeTraceGasCost(trace *types.BlockTrace) uint64 {
var (
gasCost uint64
eg errgroup.Group
)
for idx := range trace.ExecutionResults {
i := idx
eg.Go(func() error {
var sum uint64
for _, log := range trace.ExecutionResults[i].StructLogs {
sum += log.GasCost
}
atomic.AddUint64(&gasCost, sum)
return nil
})
}
_ = eg.Wait()
return gasCost
return trace.Header.GasUsed
}

View File

@@ -19,13 +19,7 @@ func TestComputeTraceCost(t *testing.T) {
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
var sum uint64
for _, v := range blockTrace.ExecutionResults {
for _, sv := range v.StructLogs {
sum += sv.GasCost
}
}
res := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, sum, res)
var expected = blockTrace.Header.GasUsed
got := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, expected, got)
}

13
common/utils/utils.go Normal file
View File

@@ -0,0 +1,13 @@
package utils
import "time"
// TryTimes try run several times until the function return true.
func TryTimes(times int, run func() bool) {
for i := 0; i < times; i++ {
if run() {
return
}
time.Sleep(time.Millisecond * 500)
}
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "prealpha-v8.2"
var tag = "prealpha-v11.14"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -22,8 +22,8 @@ var commit = func() string {
return ""
}()
// ZK_VERSION is commit-id of common/libzkp/impl/cargo.lock/common-rs
var ZK_VERSION string
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZK_VERSION)
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)

View File

@@ -232,6 +232,50 @@ function initialize(uint256 _chainId) external nonpayable
|---|---|---|
| _chainId | uint256 | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(bytes32 _blockHash) external view returns (bool)
```
Return whether the block is finalized by block hash.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHash | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(uint256 _blockHeight) external view returns (bool)
```
Return whether the block is finalized by block height.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHeight | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### lastFinalizedBatchID
```solidity

View File

@@ -97,7 +97,7 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
require(!isMessageExecuted[_msghash], "Message successfully executed");
// @todo check proof
require(IZKRollup(rollup).verifyMessageStateProof(_proof.batchIndex, _proof.blockHeight), "invalid state proof");
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHeight), "invalid state proof");
require(ZkTrieVerifier.verifyMerkleProof(_proof.merkleProof), "invalid proof");
// @todo check `_to` address to avoid attack.

View File

@@ -59,6 +59,14 @@ interface IZKRollup {
/**************************************** View Functions ****************************************/
/// @notice Return whether the block is finalized by block hash.
/// @param blockHash The hash of the block to query.
function isBlockFinalized(bytes32 blockHash) external view returns (bool);
/// @notice Return whether the block is finalized by block height.
/// @param blockHeight The height of the block to query.
function isBlockFinalized(uint256 blockHeight) external view returns (bool);
/// @notice Return the message hash by index.
/// @param _index The index to query.
function getMessageHashByIndex(uint256 _index) external view returns (bytes32);

View File

@@ -89,6 +89,24 @@ contract ZKRollup is OwnableUpgradeable, IZKRollup {
/**************************************** View Functions ****************************************/
/// @inheritdoc IZKRollup
function isBlockFinalized(bytes32 _blockHash) external view returns (bool) {
// block not commited
if (blocks[_blockHash].transactionRoot == bytes32(0)) return false;
uint256 _batchIndex = blocks[_blockHash].batchIndex;
bytes32 _batchId = finalizedBatches[_batchIndex];
return _batchId != bytes32(0);
}
/// @inheritdoc IZKRollup
function isBlockFinalized(uint256 _blockHeight) external view returns (bool) {
bytes32 _batchID = lastFinalizedBatchID;
bytes32 _batchHash = batches[_batchID].batchHash;
uint256 _maxHeight = blocks[_batchHash].blockHeight;
return _blockHeight <= _maxHeight;
}
/// @inheritdoc IZKRollup
function getMessageHashByIndex(uint256 _index) external view returns (bytes32) {
return messageQueue[_index];

View File

@@ -386,7 +386,7 @@ library RollupVerifier {
t1
)
);
update_hash_scalar(7326291674247555594112707886804937707847188185923070866278273345303869756280, absorbing, 0);
update_hash_scalar(18620528901694425296072105892920066495478887717015933899493919566746585676047, absorbing, 0);
update_hash_point(m[0], m[1], absorbing, 2);
for (t0 = 0; t0 <= 4; t0++) {
update_hash_point(proof[0 + t0 * 2], proof[1 + t0 * 2], absorbing, 5 + t0 * 3);
@@ -724,8 +724,8 @@ library RollupVerifier {
(t0, t1) = (ecc_mul_add_pm(m, proof, 1461486238301980199876269201563775120819706402602, t0, t1));
(t0, t1) = (
ecc_mul_add(
18701609130775737229348071043080155034023979562517390395403433088802478899758,
15966955543930185772599298905781740007968379271659670990460125132276790404701,
5335172776193682293002595672140655300498265857728161236987288793112411362256,
9153855726472286104461915396077745889260526805920405949557461469033032628222,
m[78],
t0,
t1
@@ -733,8 +733,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10391672869328159104536012527288890078475214572275421477472198141744100604180,
16383182967525077486800851500412772270268328143041811261940514978333847876450,
9026202793013131831701482540600751978141377016764300618037152689098701087208,
19644677619301694001087044142922327551116787792977369058786364247421954485859,
m[77],
t0,
t1
@@ -742,8 +742,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
1694121668121560366967381814358868176695875056710903754887787227675156636991,
6288755472313871386012926867179622380057563139110460659328016508371672965822,
10826234859452509771814283128042282248838241144105945602706734900173561719624,
5628243352113405764051108388315822074832358861640908064883601198703833923438,
m[76],
t0,
t1
@@ -751,8 +751,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8449090587209846475328734419746789925412190193479844231777165308243174237722,
19620423218491500875965944829407986067794157844846402182805878618955604592848,
9833916648960859819503777242562918959056952519298917148524233826817297321072,
837915750759756490172805968793319594111899487492554675680829218939384285955,
m[75],
t0,
t1
@@ -760,8 +760,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
5053208336959682582031156680199539869251745263409434673229644546747696847142,
2515271708296970065769200367712058290268116287798438948140802173656220671206,
10257805671474982710489846158410183388099935223468876792311814484878286190506,
6925081619093494730602614238209964215162532591387952125009817011864359314464,
m[74],
t0,
t1
@@ -769,8 +769,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
14044565934581841113280816557133159251170886931106151374890478449607604267942,
4516676687937794780030405510740994119381246893674971835541700695978704585552,
4475887208248126488081900175351981014160135345959097965081514547035591501401,
17809934801579097157548855239127693133451078551727048660674021788322026074440,
m[73],
t0,
t1
@@ -823,8 +823,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
4919836553908828082540426444868776555669883964231731088484431671272015675682,
2534996469663628472218664436969797350677809756735321673130157881813913441609,
18539453526841971932568089122596968064597086391356856358866942118522457107863,
3647865108410881496134024808028560930237661296032155096209994441023206530212,
m[67],
t0,
t1
@@ -841,8 +841,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
7298741378311576950839968993357330108079245118485170808123459961337830256312,
10327561179499117619949936626306234488421661318541529469701192193684736307992,
19267653195273486172950176174654469275684545789180737280515385961619717720594,
8975180971271331994178632284567744253406636398050840906044549681238954521839,
m[65],
t0,
t1
@@ -859,8 +859,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
21344975294019301064497004820288763682448968861642019035490416932201272957274,
10527619823264344893410550194287064640208153251186939130321425213582959780489,
14883199025754033315476980677331963148201112555947054150371482532558947065890,
19913319410736467436640597337700981504577668548125107926660028143291852201132,
m[63],
t0,
t1
@@ -868,8 +868,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8972742415650205333409282370033440562593431348747288268814492203356823531160,
8116706321112691122771049432546166822575953322170688547310064134261753771143,
18290509522533712126835038141804610778392690327965261406132668236833728306838,
3710298066677974093924183129147170087104393961634393354172472701713090868425,
m[62],
t0,
t1
@@ -877,8 +877,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
2245383788954722547301665173770198299224442299145553661157120655982065376923,
21429627532145565836455474503387893562363999035988060101286707048187310790834,
19363467492491917195052183458025198134822377737629876295496853723068679518308,
5854330679906778271391785925618350923591828884998994352880284635518306250788,
m[61],
t0,
t1
@@ -886,8 +886,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
6631831869726773976361406817204839637256208337970281843457872807848960103655,
9564029493986604546558813596663080644256762699468834511701525072767927949801,
16181109780595136982201896766265118193466959602989950846464420951358063185297,
8811570609098296287610981932552574275858846837699446990256241563674576678567,
m[60],
t0,
t1
@@ -895,8 +895,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
11480433023546787855799302686493624232665854025790899812568432142639901048711,
19408335616099148180409133533838326787843523379558500985213116784449716389602,
10062549363619405779400496848029040530770586215674909583260224093983878118724,
1989582851705118987083736605676322092152792129388805756040224163519806904905,
m[59],
t0,
t1
@@ -904,8 +904,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
17119009547436104907589161251911916154539209413889810725547125453954285498068,
16196009614025712805558792610177918739658373559330006740051047693948800191562,
19016883348721334939078393300672242978266248861945205052660538073783955572863,
1976040209279107904310062622264754919366006151976304093568644070161390236037,
m[58],
t0,
t1
@@ -922,8 +922,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18650010323993268535055713787599480879302828622769515272251129462854128226895,
11244246887388549559894193327128701737108444364011850111062992666532968469107,
10610438624239085062445835373411523076517149007370367578847561825933262473434,
14538778619212692682166219259545768162136692909816914624880992580957990166795,
m[56],
t0,
t1

View File

@@ -5,9 +5,9 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
test:
@@ -15,17 +15,15 @@ test:
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
cp -r ../common/libzkp/interface ./verifier/lib
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
coordinator: ## Builds the Coordinator instance.
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
cp -r ../common/libzkp/interface ./verifier/lib
go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier:
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier
test-gpu-verifier:
test-gpu-verifier: libzkp
go test -tags="gpu ffi" -timeout 0 -v ./verifier
lint: ## Lint the files - used for CI

View File

@@ -9,17 +9,6 @@ make clean
make coordinator
```
## db config
* db settings in config
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
```
## Start
* use default ports and config.json

View File

@@ -74,7 +74,7 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
go func() {
defer func() {
m.freeRoller(pubkey)
log.Info("roller unregister", "name", authMsg.Identity.Name)
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
}()
for {
@@ -82,14 +82,14 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
log.Warn("client stopped the ws connection", "err", err)
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
return
case <-notifier.Closed():
return
}
}
}()
log.Info("roller register", "name", authMsg.Identity.Name, "version", authMsg.Identity.Version)
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
return rpcSub, nil
}
@@ -116,6 +116,5 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) (bool, error) {
}
defer m.freeTaskIDForRoller(pubkey, proof.ID)
log.Info("Received zk proof", "proof id", proof.ID, "result", true)
return true, nil
}

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -1,27 +0,0 @@
package client
import (
"context"
"github.com/scroll-tech/go-ethereum"
"scroll-tech/common/message"
)
// RequestToken generates token for roller
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
var token string
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
return token, err
}
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
}
// SubmitProof get proof from roller.
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) (bool, error) {
var ok bool
return ok, c.client.CallContext(ctx, &ok, "roller_submitProof", proof)
}

View File

@@ -3,7 +3,10 @@ package client
import (
"context"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/rpc"
"scroll-tech/common/message"
)
// Client defines typed wrappers for the Ethereum RPC API.
@@ -29,3 +32,21 @@ func DialContext(ctx context.Context, rawurl string) (*Client, error) {
func NewClient(c *rpc.Client) *Client {
return &Client{client: c}
}
// RequestToken generates token for roller
func (c *Client) RequestToken(ctx context.Context, authMsg *message.AuthMsg) (string, error) {
var token string
err := c.client.CallContext(ctx, &token, "roller_requestToken", authMsg)
return token, err
}
// RegisterAndSubscribe subscribe roller and register, verified by sign data.
func (c *Client) RegisterAndSubscribe(ctx context.Context, taskCh chan *message.TaskMsg, authMsg *message.AuthMsg) (ethereum.Subscription, error) {
return c.client.Subscribe(ctx, "roller", taskCh, "register", authMsg)
}
// SubmitProof get proof from roller.
func (c *Client) SubmitProof(ctx context.Context, proof *message.ProofMsg) (bool, error) {
var ok bool
return ok, c.client.CallContext(ctx, &ok, "roller_submitProof", proof)
}

134
coordinator/cmd/app/app.go Normal file
View File

@@ -0,0 +1,134 @@
package app
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/database"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/coordinator"
"scroll-tech/coordinator/config"
)
var (
// Set up Coordinator app info.
app *cli.App
)
func init() {
app = cli.NewApp()
app.Action = action
app.Name = "coordinator"
app.Usage = "The Scroll L2 Coordinator"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Register `coordinator-test` app for integration-test.
utils.RegisterSimulation(app, "coordinator-test")
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return err
}
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
if err != nil {
return err
}
defer func() {
rollerManager.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", "error", err)
}
}()
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start roller manager", "error", err)
}
apis := rollerManager.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start RPC api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
// Register api and start ws service.
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(wsListenAddrFlag.Name),
ctx.Int(wsPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
}()
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
}
// Run run coordinator.
func Run() {
// RunApp the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunCoordinator(t *testing.T) {
coordinator := cmd.NewCmd(t, "coordinator-test", "--version")
defer coordinator.WaitExit()
// wait result
coordinator.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("coordinator version %s", version.Version))
coordinator.RunApp(nil)
}

View File

@@ -1,13 +1,8 @@
package main
package app
import "github.com/urfave/cli/v2"
var (
verifierMockFlag = cli.BoolFlag{
Name: "verifier.mock",
Usage: "Mock the verifier",
Value: false,
}
apiFlags = []cli.Flag{
// http flags
&httpEnabledFlag,

View File

@@ -1,132 +1,7 @@
package main
import (
"fmt"
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
"scroll-tech/database"
"scroll-tech/coordinator"
"scroll-tech/coordinator/config"
)
import "scroll-tech/coordinator/cmd/app"
func main() {
// Set up Coordinator app info.
app := cli.NewApp()
app.Action = action
app.Name = "Coordinator"
app.Usage = "The Scroll L2 Coordinator"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Flags = append(app.Flags, apiFlags...)
app.Flags = append(app.Flags, &verifierMockFlag)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
// Run the coordinator.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}
func applyConfig(ctx *cli.Context, cfg *config.Config) {
if ctx.IsSet(verifierMockFlag.Name) {
cfg.RollerManagerConfig.Verifier = &config.VerifierConfig{MockMode: ctx.Bool(verifierMockFlag.Name)}
}
}
func action(ctx *cli.Context) error {
// Load config file.
cfgFile := ctx.String(utils.ConfigFileFlag.Name)
cfg, err := config.NewConfig(cfgFile)
if err != nil {
log.Crit("failed to load config file", "config file", cfgFile, "error", err)
}
applyConfig(ctx, cfg)
// init db connection
var ormFactory database.OrmFactory
if ormFactory, err = database.NewOrmFactory(cfg.DBConfig); err != nil {
log.Crit("failed to init db connection", "err", err)
}
// init l2geth connection
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
log.Crit("failed to init l2geth connection", "err", err)
}
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
if err != nil {
return err
}
defer func() {
rollerManager.Stop()
err = ormFactory.Close()
if err != nil {
log.Error("can not close ormFactory", err)
}
}()
// Start all modules.
if err = rollerManager.Start(); err != nil {
log.Crit("couldn't start coordinator", "error", err)
}
apis := rollerManager.APIs()
// Register api and start rpc service.
if ctx.Bool(httpEnabledFlag.Name) {
handler, addr, err := utils.StartHTTPEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(httpListenAddrFlag.Name),
ctx.Int(httpPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start HTTP api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("HTTP endpoint closed", "url", fmt.Sprintf("http://%v/", addr))
}()
log.Info("HTTP endpoint opened", "url", fmt.Sprintf("http://%v/", addr))
}
if ctx.Bool(wsEnabledFlag.Name) {
handler, addr, err := utils.StartWSEndpoint(
fmt.Sprintf(
"%s:%d",
ctx.String(wsListenAddrFlag.Name),
ctx.Int(wsPortFlag.Name)),
apis)
if err != nil {
log.Crit("Could not start WS api", "error", err)
}
defer func() {
_ = handler.Shutdown(ctx.Context)
log.Info("WS endpoint closed", "url", fmt.Sprintf("ws://%v/", addr))
}()
log.Info("WS endpoint opened", "url", fmt.Sprintf("ws://%v/", addr))
}
// Catch CTRL-C to ensure a graceful shutdown.
interrupt := make(chan os.Signal, 1)
signal.Notify(interrupt, os.Interrupt)
// Wait until the interrupt signal is received from an OS signal.
<-interrupt
return nil
app.Run()
}

View File

@@ -7,8 +7,6 @@ import (
"path/filepath"
"strings"
"scroll-tech/common/utils"
db_config "scroll-tech/database"
)
@@ -59,10 +57,6 @@ func NewConfig(file string) (*Config, error) {
return nil, err
}
// cover value by env fields
cfg.DBConfig.DSN = utils.GetEnvWithDefault("DB_DSN", cfg.DBConfig.DSN)
cfg.DBConfig.DriverName = utils.GetEnvWithDefault("DB_DRIVER", cfg.DBConfig.DriverName)
// Check roller's order session
order := strings.ToUpper(cfg.RollerManagerConfig.OrderSession)
if len(order) > 0 && !(order == "ASC" || order == "DESC") {

View File

@@ -5,7 +5,7 @@ go 1.18
require (
github.com/orcaman/concurrent-map v1.0.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81
github.com/stretchr/testify v1.8.0
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
@@ -35,8 +35,8 @@ require (
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -349,8 +349,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea h1:KYlmCH4cDMGxQzaYoSK8+DF53POGpAmnzusAtBWzEjA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81 h1:Gm18RZ9WTR2Dupumr60E2m1Noe+l9/lITt6iRyxxZoc=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -424,8 +424,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -540,8 +540,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -553,7 +553,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=

View File

@@ -65,7 +65,6 @@ type Manager struct {
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
// TODO: once put into use, should add to graceful restart.
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
@@ -195,8 +194,18 @@ func (m *Manager) restorePrevSessions() {
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[sess.info.ID] = sess
log.Info("Coordinator restart reload sessions", "ID", sess.info.ID, "sess", sess.info)
go m.CollectProofs(sess.info.ID, sess)
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
go m.CollectProofs(sess)
}
}
}
@@ -220,17 +229,29 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
proofTimeSec := uint64(time.Since(time.Unix(sess.info.StartTimestamp, 0)).Seconds())
// Ensure this roller is eligible to participate in the session.
if roller, ok := sess.info.Rollers[pk]; !ok {
return fmt.Errorf("roller %s is not eligible to partake in proof session %v", pk, msg.ID)
} else if roller.Status == orm.RollerProofValid {
roller, ok := sess.info.Rollers[pk]
if !ok {
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
}
if roller.Status == orm.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("roller has already submitted valid proof in proof session", "roller", pk, "proof id", msg.ID)
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof id", msg.ID,
)
return nil
}
log.Info("Received zk proof", "proof id", msg.ID)
log.Info(
"handling zk proof",
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
)
defer func() {
// TODO: maybe we should use db tx for the whole process?
@@ -250,12 +271,13 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}()
if msg.Status != message.StatusOk {
log.Error("Roller failed to generate proof", "msg.ID", msg.ID, "error", msg.Error)
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskFailed); dbErr != nil {
log.Error("failed to update task status as failed", "error", dbErr)
}
// record the failed session.
m.addFailedSession(sess, msg.Error)
log.Error(
"Roller failed to generate proof",
"msg.ID", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"error", msg.Error,
)
return nil
}
@@ -280,8 +302,6 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
success, err = m.verifier.VerifyProof(msg.Proof)
if err != nil {
// record failed session.
m.addFailedSession(sess, err.Error())
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
@@ -290,35 +310,31 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
}
var status orm.ProvingStatus
if success {
status = orm.ProvingTaskVerified
} else {
// Set status as skipped if verification fails.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
status = orm.ProvingTaskFailed
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update proving_status",
"msg.ID", msg.ID,
"status", orm.ProvingTaskVerified,
"error", dbErr)
}
return dbErr
}
if dbErr = m.orm.UpdateProvingStatus(msg.ID, status); dbErr != nil {
log.Error("failed to update proving_status", "msg.ID", msg.ID, "status", status, "error", dbErr)
}
return dbErr
return nil
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(id string, sess *session) {
timer := time.NewTimer(time.Duration(m.cfg.CollectionTime) * time.Minute)
func (m *Manager) CollectProofs(sess *session) {
for {
select {
case <-timer.C:
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
m.mu.Lock()
// Ensure proper clean-up of resources.
defer func() {
delete(m.sessions, id)
// TODO: remove the clean-up, rollers report healthy status.
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
}
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
}()
@@ -335,13 +351,13 @@ func (m *Manager) CollectProofs(id string, sess *session) {
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", id)
log.Warn(errMsg, "session id", sess.info.ID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if err := m.orm.UpdateProvingStatus(id, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", id, "err", err)
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
}
return
}
@@ -351,17 +367,32 @@ func (m *Manager) CollectProofs(id string, sess *session) {
_ = participatingRollers[randIndex]
// TODO: reward winner
return
case ret := <-sess.finishChan:
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
m.mu.Unlock()
if m.isSessionFailed(sess.info) {
if err := m.orm.UpdateProvingStatus(ret.id, orm.ProvingTaskFailed); err != nil {
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
}
m.mu.Unlock()
}
}
}
func (m *Manager) isSessionFailed(info *orm.SessionInfo) bool {
for _, roller := range info.Rollers {
if roller.Status != orm.RollerProofInvalid {
return false
}
}
return true
}
// APIs collect API services.
func (m *Manager) APIs() []rpc.API {
return []rpc.API{
@@ -379,23 +410,22 @@ func (m *Manager) APIs() []rpc.API {
}
// StartProofGenerationSession starts a proof generation session
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) bool {
roller := m.selectRoller()
if roller == nil {
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success bool) {
if m.GetNumberOfIdleRollers() == 0 {
log.Warn("no idle roller when starting proof generation session", "id", task.ID)
return false
}
log.Info("start proof generation session", "id", task.ID)
var dbErr error
defer func() {
if dbErr != nil {
log.Error("StartProofGenerationSession", "dbErr", dbErr)
if !success {
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskUnassigned); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "dbErr", dbErr, "err", err)
log.Error("fail to reset task_status as Unassigned", "id", task.ID, "err", err)
}
}
}()
// Get block traces.
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
if err != nil {
log.Error(
@@ -405,7 +435,6 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) bool {
)
return false
}
traces := make([]*types.BlockTrace, len(blockInfos))
for i, blockInfo := range blockInfos {
traces[i], err = m.Client.GetBlockTraceByHash(m.ctx, common.HexToHash(blockInfo.Hash))
@@ -420,37 +449,62 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) bool {
}
}
log.Info("roller is picked", "name", roller.Name, "public_key", roller.PublicKey)
// send trace to roller
roller.sendTask(task.ID, traces)
pk := roller.PublicKey
sessionInfo := &orm.SessionInfo{
ID: task.ID,
Rollers: map[string]*orm.RollerStatus{
pk: {
PublicKey: pk,
Name: roller.Name,
Status: orm.RollerAssigned,
},
},
StartTimestamp: time.Now().Unix(),
// Dispatch task to rollers.
rollers := make(map[string]*orm.RollerStatus)
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller()
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(task.ID, traces) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.ID)
continue
}
rollers[roller.PublicKey] = &orm.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: orm.RollerAssigned}
}
if err := m.orm.SetSessionInfo(sessionInfo); err != nil {
log.Error("db set session info fail", "pk", pk, "error", err)
// No roller assigned.
if len(rollers) == 0 {
log.Error("no roller assigned", "id", task.ID, "number of idle rollers", m.GetNumberOfIdleRollers())
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", task.ID, "err", err)
return false
}
// Create a proof generation session.
s := &session{
info: sessionInfo,
sess := &session{
info: &orm.SessionInfo{
ID: task.ID,
Rollers: rollers,
StartTimestamp: time.Now().Unix(),
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.mu.Lock()
m.sessions[task.ID] = s
m.mu.Unlock()
dbErr = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned)
go m.CollectProofs(task.ID, s)
// Store session info.
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "error", err)
for _, roller := range sess.info.Rollers {
log.Error(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
return false
}
m.mu.Lock()
m.sessions[task.ID] = sess
m.mu.Unlock()
go m.CollectProofs(sess)
return true
}

View File

@@ -3,20 +3,22 @@ package coordinator_test
import (
"context"
"crypto/ecdsa"
"crypto/rand"
"fmt"
"math/big"
"net/http"
"strconv"
"strings"
"sync"
"testing"
"time"
"github.com/scroll-tech/go-ethereum"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/stretchr/testify/assert"
"golang.org/x/sync/errgroup"
"scroll-tech/common/docker"
"scroll-tech/common/message"
"scroll-tech/common/utils"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
@@ -24,23 +26,26 @@ import (
"scroll-tech/coordinator"
client2 "scroll-tech/coordinator/client"
"scroll-tech/common/docker"
"scroll-tech/common/message"
"scroll-tech/common/utils"
bridge_config "scroll-tech/bridge/config"
coordinator_config "scroll-tech/coordinator/config"
)
const managerURL = "localhost:8132"
const newManagerURL = "localhost:8133"
var (
cfg *bridge_config.Config
dbImg docker.ImgInstance
rollerManager *coordinator.Manager
handle *http.Server
cfg *bridge_config.Config
dbImg docker.ImgInstance
)
func setEnv(t *testing.T) error {
var err error
func randomURL() string {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
func setEnv(t *testing.T) (err error) {
// Load config.
cfg, err = bridge_config.NewConfig("../bridge/config.json")
assert.NoError(t, err)
@@ -48,13 +53,8 @@ func setEnv(t *testing.T) error {
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
// start roller manager
rollerManager = setupRollerManager(t, cfg.DBConfig)
// start ws service
handle, _, err = utils.StartWSEndpoint(managerURL, rollerManager.APIs())
assert.NoError(t, err)
return err
return
}
func TestApis(t *testing.T) {
@@ -64,14 +64,15 @@ func TestApis(t *testing.T) {
t.Run("TestHandshake", testHandshake)
t.Run("TestFailedHandshake", testFailedHandshake)
t.Run("TestSeveralConnections", testSeveralConnections)
t.Run("TestValidProof", testValidProof)
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
t.Run("TestRollerReconnect", testRollerReconnect)
// TODO: Restart roller alone when received task, can add this test case in integration-test.
//t.Run("TestRollerReconnect", testRollerReconnect)
t.Run("TestGracefulRestart", testGracefulRestart)
// Teardown
t.Cleanup(func() {
handle.Shutdown(context.Background())
rollerManager.Stop()
dbImg.Stop()
})
}
@@ -83,9 +84,16 @@ func testHandshake(t *testing.T) {
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
roller := newMockRoller(t, "roller_test")
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
roller := newMockRoller(t, "roller_test", wsURL)
defer roller.close()
roller.connectToCoordinator(t, managerURL)
assert.Equal(t, 1, rollerManager.GetNumberOfIdleRollers())
}
@@ -97,11 +105,16 @@ func testFailedHandshake(t *testing.T) {
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
stopCh := make(chan struct{})
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// prepare
name := "roller_test"
wsURL := "ws://" + managerURL
ctx, cancel := context.WithCancel(context.Background())
defer cancel()
@@ -116,12 +129,11 @@ func testFailedHandshake(t *testing.T) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: name,
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
taskCh := make(chan *message.TaskMsg, 4)
_, err = client.RegisterAndSubscribe(ctx, taskCh, authMsg)
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
// Try to perform handshake with timeouted token
@@ -135,7 +147,7 @@ func testFailedHandshake(t *testing.T) {
authMsg = &message.AuthMsg{
Identity: &message.Identity{
Name: name,
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -145,16 +157,11 @@ func testFailedHandshake(t *testing.T) {
authMsg.Identity.Token = token
assert.NoError(t, authMsg.Sign(privkey))
tick := time.Tick(6 * time.Second)
<-tick
taskCh = make(chan *message.TaskMsg, 4)
_, err = client.RegisterAndSubscribe(ctx, taskCh, authMsg)
<-time.After(6 * time.Second)
_, err = client.RegisterAndSubscribe(ctx, make(chan *message.TaskMsg, 4), authMsg)
assert.Error(t, err)
assert.Equal(t, 0, rollerManager.GetNumberOfIdleRollers())
close(stopCh)
}
func testSeveralConnections(t *testing.T) {
@@ -164,6 +171,14 @@ func testSeveralConnections(t *testing.T) {
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
var (
batch = 100
eg = errgroup.Group{}
@@ -172,8 +187,7 @@ func testSeveralConnections(t *testing.T) {
for i := 0; i < batch; i++ {
idx := i
eg.Go(func() error {
rollers[idx] = newMockRoller(t, "roller_test"+strconv.Itoa(idx))
rollers[idx].connectToCoordinator(t, managerURL)
rollers[idx] = newMockRoller(t, "roller_test_"+strconv.Itoa(idx), wsURL)
return nil
})
}
@@ -183,8 +197,8 @@ func testSeveralConnections(t *testing.T) {
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers())
// close connection
for i := 0; i < batch; i++ {
rollers[i].close()
for _, roller := range rollers {
roller.close()
}
var (
@@ -203,26 +217,37 @@ func testSeveralConnections(t *testing.T) {
}
}
}
func testIdleRollerSelection(t *testing.T) {
func testValidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// create mock rollers.
batch := 20
rollers := make([]*mockRoller, batch)
for i := 0; i < batch; i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i))
defer rollers[i].close()
rollers[i].connectToCoordinator(t, managerURL)
go rollers[i].waitTaskAndSendProof(t, 1, false)
}
assert.Equal(t, batch, rollerManager.GetNumberOfIdleRollers())
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
var ids = make([]string, 2)
// create mock rollers.
rollers := make([]*mockRoller, 3)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
// only roller 0 submits valid proof.
rollers[i].waitTaskAndSendProof(t, time.Second, false, i == 0)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
var ids = make([]string, 1)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
for i := range ids {
@@ -252,13 +277,35 @@ func testIdleRollerSelection(t *testing.T) {
}
}
func testRollerReconnect(t *testing.T) {
func testInvalidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 3)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
rollers[i].waitTaskAndSendProof(t, time.Second, false, false)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
var ids = make([]string, 1)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
@@ -269,16 +316,70 @@ func testRollerReconnect(t *testing.T) {
}
assert.NoError(t, dbTx.Commit())
// create mock roller
roller := newMockRoller(t, "roller_test")
defer roller.close()
roller.connectToCoordinator(t, managerURL)
go roller.waitTaskAndSendProof(t, 1, true)
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(ids) > 0 {
select {
case <-tick:
status, err := l2db.GetProvingStatusByID(ids[0])
assert.NoError(t, err)
if status == orm.ProvingTaskFailed {
ids = ids[1:]
}
case <-tickStop:
t.Error("failed to check proof status")
return
}
}
}
func testIdleRollerSelection(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 20)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
rollers[i].waitTaskAndSendProof(t, time.Second, false, true)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
}
}()
assert.Equal(t, len(rollers), rollerManager.GetNumberOfIdleRollers())
var ids = make([]string, 2)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
for i := range ids {
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
assert.NoError(t, err)
ids[i] = ID
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(15 * time.Second)
tickStop = time.Tick(10 * time.Second)
)
for len(ids) > 0 {
select {
@@ -311,42 +412,46 @@ func testGracefulRestart(t *testing.T) {
}
assert.NoError(t, dbTx.Commit())
// create mock roller
roller := newMockRoller(t, "roller_test")
roller.connectToCoordinator(t, managerURL)
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
// wait 5 seconds, coordinator restarts before roller submits proof
go roller.waitTaskAndSendProof(t, 5, true)
// create mock roller
roller := newMockRoller(t, "roller_test", wsURL)
// wait 10 seconds, coordinator restarts before roller submits proof
roller.waitTaskAndSendProof(t, 10*time.Second, false, true)
// wait for coordinator to dispatch task
<-time.After(3 * time.Second)
<-time.After(5 * time.Second)
// the coordinator will delete the roller if the subscription is closed.
roller.close()
// start new roller manager && ws service
newRollerManager := setupRollerManager(t, cfg.DBConfig)
handle, _, err = utils.StartWSEndpoint(newManagerURL, newRollerManager.APIs())
assert.NoError(t, err)
// Close rollerManager and ws handler.
handler.Shutdown(context.Background())
rollerManager.Stop()
// Setup new coordinator and ws server.
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
newHandler.Shutdown(context.Background())
newRollerManager.Stop()
handle.Shutdown(context.Background())
}()
for i := range ids {
_, err = newRollerManager.GetSessionInfo(ids[i])
info, err := newRollerManager.GetSessionInfo(ids[i])
assert.Equal(t, orm.ProvingTaskAssigned.String(), info.Status)
assert.NoError(t, err)
// at this point, roller haven't submitted
status, err := l2db.GetProvingStatusByID(ids[i])
assert.NoError(t, err)
assert.Equal(t, orm.ProvingTaskAssigned, status)
}
// will overwrite the roller client for `SubmitProof`
roller.connectToCoordinator(t, newManagerURL)
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, true)
defer roller.close()
// at this point, roller haven't submitted
status, err := l2db.GetProvingStatusByID(ids[0])
assert.NoError(t, err)
assert.Equal(t, orm.ProvingTaskAssigned, status)
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
@@ -370,13 +475,13 @@ func testGracefulRestart(t *testing.T) {
}
}
func setupRollerManager(t *testing.T, dbCfg *database.DBConfig) *coordinator.Manager {
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession uint8, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
// Get db handler.
db, err := database.NewOrmFactory(dbCfg)
assert.True(t, assert.NoError(t, err), "failed to get db handler.")
rollerManager, err := coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
RollersPerSession: 1,
rollerManager, err = coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
RollersPerSession: rollersPerSession,
Verifier: &coordinator_config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
@@ -384,101 +489,136 @@ func setupRollerManager(t *testing.T, dbCfg *database.DBConfig) *coordinator.Man
assert.NoError(t, err)
assert.NoError(t, rollerManager.Start())
return rollerManager
// start ws service
handler, _, err = utils.StartWSEndpoint(strings.Split(wsURL, "//")[1], rollerManager.APIs())
assert.NoError(t, err)
return rollerManager, handler
}
type mockRoller struct {
rollerName string
privKey *ecdsa.PrivateKey
taskCh chan *message.TaskMsg
sub ethereum.Subscription
client *client2.Client
stopCh chan struct{}
wsURL string
client *client2.Client
taskCh chan *message.TaskMsg
taskCache sync.Map
sub ethereum.Subscription
stopCh chan struct{}
}
func newMockRoller(t *testing.T, rollerName string) *mockRoller {
func newMockRoller(t *testing.T, rollerName string, wsURL string) *mockRoller {
privKey, err := crypto.GenerateKey()
assert.NoError(t, err)
return &mockRoller{
roller := &mockRoller{
rollerName: rollerName,
privKey: privKey,
taskCh: make(chan *message.TaskMsg, 4)}
wsURL: wsURL,
taskCh: make(chan *message.TaskMsg, 4),
stopCh: make(chan struct{}),
}
roller.client, roller.sub, err = roller.connectToCoordinator()
assert.NoError(t, err)
return roller
}
// connectToCoordinator sets up a websocket client to connect to the roller manager.
func (r *mockRoller) connectToCoordinator(t *testing.T, wsURL string) {
// create a new ws connection
var err error
r.client, err = client2.Dial("ws://" + wsURL)
assert.NoError(t, err)
func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscription, error) {
// Create connection.
client, err := client2.Dial(r.wsURL)
if err != nil {
return nil, nil, err
}
// create a new ws connection
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(r.privKey))
_ = authMsg.Sign(r.privKey)
token, err := r.client.RequestToken(context.Background(), authMsg)
assert.NoError(t, err)
token, err := client.RequestToken(context.Background(), authMsg)
if err != nil {
return nil, nil, err
}
authMsg.Identity.Token = token
_ = authMsg.Sign(r.privKey)
assert.NoError(t, authMsg.Sign(r.privKey))
r.sub, err = r.client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
assert.NoError(t, err)
sub, err := client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
if err != nil {
return nil, nil, err
}
r.stopCh = make(chan struct{})
return client, sub, nil
}
go func() {
<-r.stopCh
r.sub.Unsubscribe()
}()
func (r *mockRoller) releaseTasks() {
r.taskCache.Range(func(key, value any) bool {
r.taskCh <- value.(*message.TaskMsg)
r.taskCache.Delete(key)
return true
})
}
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnectBeforeSendProof bool) {
for {
task := <-r.taskCh
// simulate proof time
<-time.After(proofTime * time.Second)
if reconnectBeforeSendProof {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
r.reconnetToCoordinator(t)
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, validProof bool) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
var err error
r.client, r.sub, err = r.connectToCoordinator()
if err != nil {
t.Fatal(err)
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
assert.NoError(t, proof.Sign(r.privKey))
ok, err := r.client.SubmitProof(context.Background(), proof)
assert.NoError(t, err)
assert.Equal(t, true, ok)
}
// Release cached tasks.
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, validProof, r.stopCh)
}
func (r *mockRoller) reconnetToCoordinator(t *testing.T) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
Timestamp: time.Now().UnixNano(),
},
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, validProof bool, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
r.taskCache.Store(task.ID, task)
// simulate proof time
select {
case <-time.After(proofTime):
case <-stopCh:
return
}
proof := &message.ProofMsg{
ProofDetail: &message.ProofDetail{
ID: task.ID,
Status: message.StatusOk,
Proof: &message.AggProof{},
},
}
if !validProof {
proof.Status = message.StatusProofError
}
assert.NoError(t, proof.Sign(r.privKey))
ok, err := client.SubmitProof(context.Background(), proof)
assert.NoError(t, err)
assert.Equal(t, true, ok)
case <-stopCh:
return
}
}
assert.NoError(t, authMsg.Sign(r.privKey))
token, err := r.client.RequestToken(context.Background(), authMsg)
assert.NoError(t, err)
authMsg.Identity.Token = token
assert.NoError(t, authMsg.Sign(r.privKey))
r.sub, err = r.client.RegisterAndSubscribe(context.Background(), r.taskCh, authMsg)
assert.NoError(t, err)
}
func (r *mockRoller) close() {
close(r.stopCh)
r.sub.Unsubscribe()
}

View File

@@ -41,7 +41,7 @@ func (r *rollerNode) sendTask(id string, traces []*types.BlockTrace) bool {
}:
r.TaskIDs.Set(id, struct{}{})
default:
log.Warn("roller channel is full")
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
return false
}
return true
@@ -77,6 +77,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
@@ -105,17 +106,18 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers() int {
pubkeys := m.rollerPool.Keys()
for i := 0; i < len(pubkeys); i++ {
if val, ok := m.rollerPool.Get(pubkeys[i]); ok {
func (m *Manager) GetNumberOfIdleRollers() (count int) {
for i, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() > 0 {
pubkeys[i], pubkeys = pubkeys[len(pubkeys)-1], pubkeys[:len(pubkeys)-1]
if r.TaskIDs.Count() == 0 {
count++
}
} else {
log.Error("rollerPool Get fail", "pk", pk, "idx", i, "pk len", pk)
}
}
return len(pubkeys)
return count
}
func (m *Manager) selectRoller() *rollerNode {
@@ -127,6 +129,8 @@ func (m *Manager) selectRoller() *rollerNode {
if r.TaskIDs.Count() == 0 {
return r
}
} else {
log.Error("rollerPool Get fail", "pk", pubkeys[idx.Int64()], "idx", idx.Int64(), "pk len", len(pubkeys))
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}

View File

@@ -0,0 +1,23 @@
//go:build mock_verifier
package verifier
import (
"scroll-tech/common/message"
"scroll-tech/coordinator/config"
)
// Verifier represents a mock halo2 verifier.
type Verifier struct {
}
// NewVerifier Sets up a mock verifier.
func NewVerifier(_ *config.VerifierConfig) (*Verifier, error) {
return &Verifier{}, nil
}
// VerifyProof always return true
func (v *Verifier) VerifyProof(proof *message.AggProof) (bool, error) {
return true, nil
}

View File

@@ -1,3 +1,5 @@
//go:build !mock_verifier
package verifier
/*

View File

@@ -29,14 +29,3 @@ db_cli rollback
```bash
make test
```
## db config
* db settings in config
```bash
# DB_DSN: db data source name
export DB_DSN="postgres://admin:123456@localhost/test_db?sslmode=disable"
# DB_DRIVER: db driver name
export DB_DRIVER="postgres"
```

80
database/cmd/app/app.go Normal file
View File

@@ -0,0 +1,80 @@
package app
import (
"fmt"
"os"
"github.com/urfave/cli/v2"
"scroll-tech/common/utils"
"scroll-tech/common/version"
)
var (
// Set up database app info.
app *cli.App
)
func init() {
app = cli.NewApp()
// Set up database app info.
app.Name = "db_cli"
app.Usage = "The Scroll Database CLI"
app.Version = version.Version
app.Flags = append(app.Flags, utils.CommonFlags...)
app.Before = func(ctx *cli.Context) error {
return utils.LogSetup(ctx)
}
app.Commands = []*cli.Command{
{
Name: "reset",
Usage: "Clean and reset database.",
Action: resetDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "status",
Usage: "Check migration status.",
Action: checkDBStatus,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "version",
Usage: "Display the current database version.",
Action: dbVersion,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "migrate",
Usage: "Migrate the database to the latest version.",
Action: migrateDB,
Flags: []cli.Flag{&utils.ConfigFileFlag},
},
{
Name: "rollback",
Usage: "Roll back the database to a previous <version>. Rolls back a single migration if no version specified.",
Action: rollbackDB,
Flags: []cli.Flag{
&utils.ConfigFileFlag,
&cli.IntFlag{
Name: "version",
Usage: "Rollback to the specified version.",
Value: 0,
}},
},
}
// Register `db_cli-test` app for integration-test.
utils.RegisterSimulation(app, "db_cli-test")
}
// Run run database cmd instance.
func Run() {
// RunApp the db_cli.
if err := app.Run(os.Args); err != nil {
_, _ = fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
}

View File

@@ -0,0 +1,19 @@
package app
import (
"fmt"
"testing"
"time"
"scroll-tech/common/cmd"
"scroll-tech/common/version"
)
func TestRunDatabase(t *testing.T) {
bridge := cmd.NewCmd(t, "db_cli-test", "--version")
defer bridge.WaitExit()
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("db_cli version %s", version.Version))
bridge.RunApp(nil)
}

Some files were not shown because too many files have changed in this diff Show More