Compare commits

...

52 Commits

Author SHA1 Message Date
maskpp
88703e113e Add fast cache. 2023-01-31 14:10:01 +08:00
HAOYUatHZ
346c713d27 feat(redis): support ClusterClient (#268) 2023-01-30 10:48:54 +08:00
maskpp
c8c063b622 fix(docker redis): Fix redis docker randomly panic. (#267) 2023-01-29 20:46:40 +08:00
maskpp
b5549f6301 feat(cache and trace api): Add redis cache in database and add trace api in bridge.l2backend. (#228)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-29 09:56:25 +08:00
Péter Garamvölgyi
16576b6f53 fix(bridge): Handle duplicate messages (#261) 2023-01-20 12:09:47 +01:00
HAOYUatHZ
aa885f068f docs(db): fix sql comments (#260) 2023-01-20 09:44:19 +08:00
HAOYUatHZ
1f764a579d refactor(bridge): refactor layer2MessageOrm.GetL2Messages() (#243) 2023-01-20 09:29:34 +08:00
HAOYUatHZ
91ee767669 doc(db): update sql comment (#259) 2023-01-20 09:27:50 +08:00
Péter Garamvölgyi
7eac41691e feat(bridge): handle expired messages correctly (#257) 2023-01-19 23:53:34 +01:00
Péter Garamvölgyi
d9516890b0 feat(bridge): handle expired messages (#256) 2023-01-19 23:21:17 +01:00
Péter Garamvölgyi
ddb96bb732 feat(bridge): add more l1 relayer logs (#255) 2023-01-19 22:37:02 +01:00
Péter Garamvölgyi
e419dd8d5c fix(bridge): add limit to GetL1MessagesByStatus (#254) 2023-01-19 22:19:48 +01:00
Péter Garamvölgyi
c99c65bdfd fix(bridge): execute watcher loops independently (#253) 2023-01-19 21:14:45 +01:00
colin
18fd7f56a8 fix(coordinator): reset roller state when proof session timeout (#210)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-19 21:37:30 +08:00
Péter Garamvölgyi
a319dc1cff bugfix(bridge): only relay messages for finalized batches (#251)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 20:54:09 +08:00
colin
52bf3a55fc fix(coordinator): fix CollectProofs for multi-roller & add tests (#252)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-19 14:52:20 +08:00
Péter Garamvölgyi
598e10e4fc feat(bridge): update skipped batches in a single db operation (#242) 2023-01-18 15:21:41 +01:00
maskpp
eed3f42731 fix(coordinator): Fix bug in coordinator.GetNumberOfIdleRollers function. (#247)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-18 20:51:19 +08:00
colin
5a4bea8ccd fix(coordinator): set session failed when all rollers have submitted invalid proof (#241)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-18 20:01:43 +08:00
ChuhanJin
5b37b63d89 build(jenkinsfile): replace status_check with comment in coverage test (#249)
Co-authored-by: vincent <419436363@qq.com>
2023-01-18 19:44:47 +08:00
HAOYUatHZ
5e5c4f7701 build(roller&coordinator): fix Makefile (#245)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-18 16:51:55 +08:00
HAOYUatHZ
09dc638652 perf(db): add limit in block_batch queries (#240) 2023-01-17 20:46:51 +08:00
HAOYUatHZ
b598a01e7f build(jenkins): remove changeset condition (#239) 2023-01-17 07:07:58 +08:00
Scroll Dev
0fcdb6f824 doc: bump version number (#238) 2023-01-17 06:57:43 +08:00
Xi Lin
5a95dcf5ba bugfix(bridge&database): fix compute message hash and add unit tests (#233)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-17 06:56:21 +08:00
Xi Lin
d0c63e75df bugfix(database): make sure return order of statuses in GetRollupStatusByIDList (#235)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-16 20:57:27 +08:00
Scroll Dev
676b8a2230 Update pull_request_template.md 2023-01-15 07:46:57 +08:00
Xi Lin
a1cb3d3b87 fix(contracts&libzkp): fix mpt circuits panic (#232)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-14 14:58:37 +08:00
colin
47b4c54e05 fix(relayer): use sync map for concurrent write (#231)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
2023-01-14 12:07:59 +08:00
maskpp
fe822a65b9 refactor(bridge): refactor bridge parallelization (#213)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
2023-01-14 09:40:07 +08:00
HAOYUatHZ
9bd4931f93 chore: fix typos in pull_request template (#230) 2023-01-13 11:50:39 +08:00
HAOYUatHZ
411cb19b62 chore: add pull_request template (#229) 2023-01-13 11:46:19 +08:00
HAOYUatHZ
8f55299941 Revert "refactor(coordinator): remove debug api namespace from manager" (#224)
Co-authored-by: Nazarii Denha <dengaaa2002@gmail.com>
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-13 08:41:36 +08:00
HAOYUatHZ
0bdcce79ba chore: bump version number (#226) 2023-01-13 08:33:53 +08:00
Péter Garamvölgyi
fcd29c305d fix(coordinator): use uint32 for timestamp to enable RLP encoding (#225) 2023-01-12 18:24:59 +01:00
Lawliet-Chan
54a6ab472a feat(message): replace json.Marshal with rlp.Encode for Signing (#219) 2023-01-12 22:21:59 +08:00
HAOYUatHZ
b2a5baa2ad feat(bridge): upgrade TraceGasCost estimation (#222) 2023-01-12 16:45:11 +08:00
Lawliet-Chan
dc6b71ca23 chore: improve golang lint rules (#172)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-12 12:55:00 +08:00
Nazarii Denha
e1247a7eb2 refactor(coordinator): remove debug api namespace from manager (#221)
Co-authored-by: colin <102356659+colinlyguo@users.noreply.github.com>
2023-01-12 10:16:48 +08:00
colin
65699b89bb feat(coordinator): support rollers-per-session (#215)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: maskpp <maskpp266@gmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 21:49:37 +08:00
maskpp
a44956a05f fix(bug): fix data race in common/cmd module. (#220) 2023-01-11 21:05:46 +08:00
maskpp
b85b4bafc2 refactor(coordinator): refactor CollectProofs func (#211)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-11 13:31:42 +08:00
colin
2e3c80c580 feat(roller): add submit proof retry (#217)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-11 12:30:33 +08:00
Péter Garamvölgyi
d24392feac feat(bridge): Propose batches, fetch blocks and events in parallel (#216) 2023-01-10 23:13:06 +08:00
ChuhanJin
5c6e20a774 build(Jenkins): add docker push job for tag (#214)
Co-authored-by: vincent <419436363@qq.com>
2023-01-10 14:43:20 +08:00
HAOYUatHZ
9f9e23ff0e chore(coordinator): add more logs (#177)
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
2023-01-10 09:29:43 +08:00
Péter Garamvölgyi
fa93de97de feat(bridge): Relay messages and batches in parallel (#212)
Co-authored-by: HAOYUatHZ <haoyu@protonmail.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-10 09:18:36 +08:00
HAOYUatHZ
deedf7a5d0 refactor(coordinator): refactor CollectProofs (#208) 2023-01-10 08:49:14 +08:00
Xi Lin
73432127cd feat(contract): only use blockHeight to verify relay message in layer1 (#209) 2023-01-09 15:03:30 +08:00
colin
a78160ddad fix(roller): avoid discarding task when connection failure (#203)
Co-authored-by: colinlyguo <colinlyguo@gmail.com>
Co-authored-by: Lawliet-Chan <1576710154@qq.com>
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 10:52:50 +08:00
Lawliet-Chan
fff2517a76 build(libzkp): update dependency common-rs -> scroll-zkevm (#204)
Co-authored-by: HAOYUatHZ <37070449+HAOYUatHZ@users.noreply.github.com>
2023-01-09 09:32:14 +08:00
Xi Lin
eba7647e21 bugfix(bridge): add suffix to id in processingFinalization and processingCommitment (#207)
Co-authored-by: Péter Garamvölgyi <peter@scroll.io>
2023-01-06 14:33:21 +01:00
94 changed files with 2489 additions and 923 deletions

7
.github/pull_request_template.md vendored Normal file
View File

@@ -0,0 +1,7 @@
1. Purpose or design rationale of this PR
2. Does this PR involve a new deployment, and involve a new git tag & docker image tag? If so, has `tag` in `common/version.go` been updated?
3. Is this PR a breaking change? If so, have it been attached a `breaking-change` label?

38
Jenkinsfile vendored
View File

@@ -17,18 +17,6 @@ pipeline {
}
stages {
stage('Build') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
changeset "tests/**"
}
}
parallel {
stage('Build Prerequisite') {
steps {
@@ -70,18 +58,6 @@ pipeline {
}
}
stage('Parallel Test') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
changeset "tests/**"
}
}
parallel{
stage('Test bridge package') {
steps {
@@ -126,24 +102,12 @@ pipeline {
}
}
stage('Compare Coverage') {
when {
anyOf {
changeset "Jenkinsfile"
changeset "build/**"
changeset "go.work**"
changeset "bridge/**"
changeset "coordinator/**"
changeset "common/**"
changeset "database/**"
changeset "tests/**"
}
}
steps {
sh "./build/post-test-report-coverage.sh"
script {
currentBuild.result = 'SUCCESS'
}
step([$class: 'CompareCoverageAction', publishResultAs: 'statusCheck', scmVars: [GIT_URL: env.GIT_URL]])
step([$class: 'CompareCoverageAction', publishResultAs: 'Comment', scmVars: [GIT_URL: env.GIT_URL]])
}
}
}

View File

@@ -9,20 +9,20 @@ import (
)
const (
// SENT_MESSAGE_EVENT_SIGNATURE = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SENT_MESSAGE_EVENT_SIGNATURE = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// SentMessageEventSignature = keccak256("SentMessage(address,address,uint256,uint256,uint256,bytes,uint256,uint256)")
SentMessageEventSignature = "806b28931bc6fbe6c146babfb83d5c2b47e971edb43b4566f010577a0ee7d9f4"
// RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("RelayedMessage(bytes32)")
RELAYED_MESSAGE_EVENT_SIGNATURE = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// RelayedMessageEventSignature = keccak256("RelayedMessage(bytes32)")
RelayedMessageEventSignature = "4641df4a962071e12719d8c8c8e5ac7fc4d97b927346a3d7a335b1f7517e133c"
// FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = keccak256("FailedRelayedMessage(bytes32)")
FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// FailedRelayedMessageEventSignature = keccak256("FailedRelayedMessage(bytes32)")
FailedRelayedMessageEventSignature = "99d0e048484baa1b1540b1367cb128acd7ab2946d1ed91ec10e3c85e4bf51b8f"
// COMMIT_BATCH_EVENT_SIGNATURE = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
COMMIT_BATCH_EVENT_SIGNATURE = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// CommitBatchEventSignature = keccak256("CommitBatch(bytes32,bytes32,uint256,bytes32)")
CommitBatchEventSignature = "a26d4bd91c4c2eff3b1bf542129607d782506fc1950acfab1472a20d28c06596"
// FINALIZED_BATCH_EVENT_SIGNATURE = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FINALIZED_BATCH_EVENT_SIGNATURE = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
// FinalizedBatchEventSignature = keccak256("FinalizeBatch(bytes32,bytes32,uint256,bytes32)")
FinalizedBatchEventSignature = "e20f311a96205960de4d2bb351f7729e5136fa36ae64d7f736c67ddc4ca4cd4b"
)
var (

View File

@@ -15,5 +15,5 @@ func TestRunBridge(t *testing.T) {
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("bridge version %s", version.Version))
bridge.RunApp(false)
bridge.RunApp(nil)
}

View File

@@ -61,7 +61,15 @@
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
"persistence": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
},
"redis": {
"url": "redis://default:@localhost:6379/0",
"expirations": {
"trace": 3600
}
}
}
}

View File

@@ -5,15 +5,16 @@ go 1.18
require (
github.com/iden3/go-iden3-crypto v0.0.13
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
modernc.org/mathutil v1.4.1
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
@@ -27,6 +28,7 @@ require (
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 // indirect
github.com/rjeczalik/notify v0.9.1 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -36,8 +38,8 @@ require (
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -73,8 +73,8 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -336,6 +336,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0/go.mod h1:qqbHyh8v60DhA7CoWK5oRCqLrMHRGoxYCSS9EjAz6Eo=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
@@ -348,8 +350,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -372,6 +374,7 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -379,8 +382,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -423,8 +427,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -539,8 +543,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -552,7 +556,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -662,5 +666,7 @@ honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWh
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg=
honnef.co/go/tools v0.1.3/go.mod h1:NgwopIslSNH47DimFoV78dnkksY2EFtX0ajyb3K/las=
modernc.org/mathutil v1.4.1 h1:ij3fYGe8zBF4Vu+g0oT7mB06r8sqGWKuJu1yXeR4by8=
modernc.org/mathutil v1.4.1/go.mod h1:mZW8CKdRPY1v87qxC/wUdX5O1qDzXMP5TH3wjfpga6E=
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -37,8 +37,8 @@ func setupEnv(t *testing.T) {
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
}
func free(t *testing.T) {

View File

@@ -72,15 +72,20 @@ func NewLayer1Relayer(ctx context.Context, ethClient *ethclient.Client, l1Confir
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer1Relayer) ProcessSavedEvents() {
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending)
msgs, err := r.db.GetL1MessagesByStatus(orm.MsgPending, 100)
if err != nil {
log.Error("Failed to fetch unprocessed L1 messages", "err", err)
return
}
if len(msgs) > 0 {
log.Info("Processing L1 messages", "count", len(msgs))
}
for _, msg := range msgs {
if err = r.processSavedEvent(msg); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process event", "err", err)
log.Error("failed to process event", "msg.msgHash", msg.MsgHash, "err", err)
}
return
}
@@ -109,6 +114,12 @@ func (r *Layer1Relayer) processSavedEvent(msg *orm.L1Message) error {
}
hash, err := r.sender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer1Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
return err
}

View File

@@ -111,7 +111,7 @@ func (w *Watcher) Stop() {
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
// FetchContractEvent pull latest event logs from given contract address and save in Persistence
func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
defer func() {
log.Info("l1 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
@@ -138,11 +138,11 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 5)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][3] = common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE)
query.Topics[0][4] = common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
query.Topics[0][3] = common.HexToHash(bridge_abi.CommitBatchEventSignature)
query.Topics[0][4] = common.HexToHash(bridge_abi.FinalizedBatchEventSignature)
logs, err := w.client.FilterLogs(w.ctx, query)
if err != nil {
@@ -199,10 +199,10 @@ func (w *Watcher) FetchContractEvent(blockHeight uint64) error {
for _, msg := range relayedMessageEvents {
if msg.isSuccessful {
// succeed
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgConfirmed, msg.txHash.String())
} else {
// failed
err = w.db.UpdateLayer1StatusAndLayer2Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
err = w.db.UpdateLayer2StatusAndLayer1Hash(w.ctx, msg.msgHash.String(), orm.MsgFailed, msg.txHash.String())
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
@@ -229,7 +229,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
var rollupEvents []rollupEvent
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
@@ -250,7 +250,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
event.Target = common.HexToAddress(vLog.Topics[1].String())
l1Messages = append(l1Messages, &orm.L1Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -261,7 +261,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
Calldata: common.Bytes2Hex(event.Message),
Layer1Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -272,7 +272,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -283,7 +283,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
isSuccessful: false,
})
case common.HexToHash(bridge_abi.COMMIT_BATCH_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.CommitBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash
@@ -303,7 +303,7 @@ func (w *Watcher) parseBridgeEventLogs(logs []types.Log) ([]*orm.L1Message, []re
txHash: vLog.TxHash,
status: orm.RollupCommitted,
})
case common.HexToHash(bridge_abi.FINALIZED_BATCH_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FinalizedBatchEventSignature):
event := struct {
BatchID common.Hash
BatchHash common.Hash

View File

@@ -32,7 +32,10 @@ func New(ctx context.Context, cfg *config.L2Config, orm database.OrmFactory) (*B
return nil, err
}
l2Watcher := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
l2Watcher, err := NewL2WatcherClient(ctx, client, cfg.Confirmations, cfg.BatchProposerConfig, cfg.L2MessengerAddress, orm)
if err != nil {
return nil, err
}
return &Backend{
cfg: cfg,

View File

@@ -40,7 +40,7 @@ func newBatchProposer(cfg *config.BatchProposerConfig, orm database.OrmFactory)
}
}
func (w *batchProposer) tryProposeBatch() error {
func (w *batchProposer) tryProposeBatch() {
w.mutex.Lock()
defer w.mutex.Unlock()
@@ -49,20 +49,27 @@ func (w *batchProposer) tryProposeBatch() error {
fmt.Sprintf("order by number ASC LIMIT %d", w.batchBlocksLimit),
)
if err != nil {
return err
log.Error("failed to get unbatched blocks", "err", err)
return
}
if len(blocks) == 0 {
return nil
return
}
if blocks[0].GasUsed > w.batchGasThreshold {
log.Warn("gas overflow even for only 1 block", "height", blocks[0].Number, "gas", blocks[0].GasUsed)
return w.createBatchForBlocks(blocks[:1])
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
if blocks[0].TxNum > w.batchTxNumThreshold {
log.Warn("too many txs even for only 1 block", "height", blocks[0].Number, "tx_num", blocks[0].TxNum)
return w.createBatchForBlocks(blocks[:1])
if err = w.createBatchForBlocks(blocks[:1]); err != nil {
log.Error("failed to create batch", "number", blocks[0].Number, "err", err)
}
return
}
var (
@@ -83,10 +90,12 @@ func (w *batchProposer) tryProposeBatch() error {
// if it's not old enough we will skip proposing the batch,
// otherwise we will still propose a batch
if length == len(blocks) && blocks[0].BlockTimestamp+w.batchTimeSec > uint64(time.Now().Unix()) {
return nil
return
}
return w.createBatchForBlocks(blocks)
if err = w.createBatchForBlocks(blocks); err != nil {
log.Error("failed to create batch", "from", blocks[0].Number, "to", blocks[len(blocks)-1].Number, "err", err)
}
}
func (w *batchProposer) createBatchForBlocks(blocks []*orm.BlockInfo) error {

View File

@@ -49,7 +49,7 @@ func testBatchProposer(t *testing.T) {
BatchTimeSec: 1,
BatchBlocksLimit: 100,
}, db)
assert.NoError(t, proposer.tryProposeBatch())
proposer.tryProposeBatch()
infos, err := db.GetUnbatchedBlocks(map[string]interface{}{},
fmt.Sprintf("order by number ASC LIMIT %d", 100))

View File

@@ -19,6 +19,7 @@ var (
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
redisImg docker.ImgInstance
// l2geth client
l2Cli *ethclient.Client
@@ -40,8 +41,12 @@ func setupEnv(t *testing.T) (err error) {
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
// Create redis container.
redisImg = docker.NewTestRedisDocker(t)
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
// Create l2geth client.
l2Cli, err = ethclient.Dial(cfg.L2Config.Endpoint)
@@ -60,6 +65,9 @@ func free(t *testing.T) {
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
}
func TestFunction(t *testing.T) {
@@ -77,6 +85,7 @@ func TestFunction(t *testing.T) {
t.Run("TestL2RelayerProcessSaveEvents", testL2RelayerProcessSaveEvents)
t.Run("testL2RelayerProcessPendingBatches", testL2RelayerProcessPendingBatches)
t.Run("testL2RelayerProcessCommittedBatches", testL2RelayerProcessCommittedBatches)
t.Run("testL2RelayerSkipBatches", testL2RelayerSkipBatches)
t.Run("testBatchProposer", testBatchProposer)

View File

@@ -3,7 +3,10 @@ package l2
import (
"context"
"errors"
"fmt"
"math/big"
"runtime"
"sync"
"time"
// not sure if this will make problems when relay with l1geth
@@ -11,6 +14,8 @@ import (
"github.com/scroll-tech/go-ethereum/accounts/abi"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"modernc.org/mathutil"
"scroll-tech/database"
"scroll-tech/database/orm"
@@ -41,14 +46,17 @@ type Layer2Relayer struct {
rollupCh <-chan *sender.Confirmation
l1RollupABI *abi.ABI
// a list of processing message, indexed by layer2 hash
processingMessage map[string]string
// A list of processing message.
// key(string): confirmation ID, value(string): layer2 hash.
processingMessage sync.Map
// a list of processing batch commitment, indexed by batch id
processingCommitment map[string]string
// A list of processing batch commitment.
// key(string): confirmation ID, value(string): batch id.
processingCommitment sync.Map
// a list of processing batch finalization, indexed by batch id
processingFinalization map[string]string
// A list of processing batch finalization.
// key(string): confirmation ID, value(string): batch id.
processingFinalization sync.Map
stopCh chan struct{}
}
@@ -78,23 +86,50 @@ func NewLayer2Relayer(ctx context.Context, db database.OrmFactory, cfg *config.R
rollupCh: rollupSender.ConfirmChan(),
l1RollupABI: bridge_abi.RollupMetaABI,
cfg: cfg,
processingMessage: map[string]string{},
processingCommitment: map[string]string{},
processingFinalization: map[string]string{},
processingMessage: sync.Map{},
processingCommitment: sync.Map{},
processingFinalization: sync.Map{},
stopCh: make(chan struct{}),
}, nil
}
const processMsgLimit = 100
// ProcessSavedEvents relays saved un-processed cross-domain transactions to desired blockchain
func (r *Layer2Relayer) ProcessSavedEvents() {
func (r *Layer2Relayer) ProcessSavedEvents(wg *sync.WaitGroup) {
defer wg.Done()
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return
}
// msgs are sorted by nonce in increasing order
msgs, err := r.db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := r.db.GetL2Messages(
map[string]interface{}{"status": orm.MsgPending},
fmt.Sprintf("AND height<=%d", batch.EndBlockNumber),
fmt.Sprintf("ORDER BY nonce ASC LIMIT %d", processMsgLimit),
)
if err != nil {
log.Error("Failed to fetch unprocessed L2 messages", "err", err)
return
}
for _, msg := range msgs {
if err := r.processSavedEvent(msg); err != nil {
// process messages in batches
batchSize := mathutil.Min((runtime.GOMAXPROCS(0)+1)/2, r.messageSender.NumberOfAccounts())
for size := 0; len(msgs) > 0; msgs = msgs[size:] {
if size = len(msgs); size > batchSize {
size = batchSize
}
var g errgroup.Group
for _, msg := range msgs[:size] {
msg := msg
g.Go(func() error {
return r.processSavedEvent(msg, batch.Index)
})
}
if err := g.Wait(); err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("failed to process l2 saved event", "err", err)
}
@@ -103,25 +138,13 @@ func (r *Layer2Relayer) ProcessSavedEvents() {
}
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
// @todo add support to relay multiple messages
batch, err := r.db.GetLatestFinalizedBatch()
if err != nil {
log.Error("GetLatestFinalizedBatch failed", "err", err)
return err
}
if batch.EndBlockNumber < msg.Height {
// log.Warn("corresponding block not finalized", "status", status)
return nil
}
func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message, index uint64) error {
// @todo fetch merkle proof from l2geth
log.Info("Processing L2 Message", "msg.nonce", msg.Nonce, "msg.height", msg.Height)
proof := bridge_abi.IL1ScrollMessengerL2MessageProof{
BlockHeight: big.NewInt(int64(msg.Height)),
BatchIndex: big.NewInt(int64(batch.Index)),
BatchIndex: big.NewInt(0).SetUint64(index),
MerkleProof: make([]byte, 0),
}
from := common.HexToAddress(msg.Sender)
@@ -144,6 +167,12 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
}
hash, err := r.messageSender.SendTransaction(msg.MsgHash, &r.cfg.MessengerContractAddress, big.NewInt(0), data)
if err != nil && err.Error() == "execution reverted: Message expired" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgExpired)
}
if err != nil && err.Error() == "execution reverted: Message successfully executed" {
return r.db.UpdateLayer2Status(r.ctx, msg.MsgHash, orm.MsgConfirmed)
}
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send relayMessageWithProof tx to layer1 ", "msg.height", msg.Height, "msg.MsgHash", msg.MsgHash, "err", err)
@@ -159,14 +188,15 @@ func (r *Layer2Relayer) processSavedEvent(msg *orm.L2Message) error {
log.Error("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msg.MsgHash, "err", err)
return err
}
r.processingMessage[msg.MsgHash] = msg.MsgHash
r.processingMessage.Store(msg.MsgHash, msg.MsgHash)
return nil
}
// ProcessPendingBatches submit batch data to layer 1 rollup contract
func (r *Layer2Relayer) ProcessPendingBatches() {
func (r *Layer2Relayer) ProcessPendingBatches(wg *sync.WaitGroup) {
defer wg.Done()
// batches are sorted by batch index in increasing order
batchesInDB, err := r.db.GetPendingBatches()
batchesInDB, err := r.db.GetPendingBatches(1)
if err != nil {
log.Error("Failed to fetch pending L2 batches", "err", err)
return
@@ -237,28 +267,39 @@ func (r *Layer2Relayer) ProcessPendingBatches() {
return
}
txID := id + "-commit"
// add suffix `-commit` to avoid duplication with finalize tx in unit tests
hash, err := r.rollupSender.SendTransaction(id+"-commit", &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
log.Error("Failed to send commitBatch tx to layer1 ", "id", id, "index", batch.Index, "err", err)
}
return
}
log.Info("commitBatch in layer1", "id", id, "index", batch.Index, "hash", hash)
log.Info("commitBatch in layer1", "batch_id", id, "index", batch.Index, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupCommitting)
if err != nil {
log.Error("UpdateCommitTxHashAndRollupStatus failed", "id", id, "index", batch.Index, "err", err)
}
r.processingCommitment[id] = id
r.processingCommitment.Store(txID, id)
}
// ProcessCommittedBatches submit proof to layer 1 rollup contract
func (r *Layer2Relayer) ProcessCommittedBatches() {
func (r *Layer2Relayer) ProcessCommittedBatches(wg *sync.WaitGroup) {
defer wg.Done()
// set skipped batches in a single db operation
if count, err := r.db.UpdateSkippedBatches(); err != nil {
log.Error("UpdateSkippedBatches failed", "err", err)
// continue anyway
} else if count > 0 {
log.Info("Skipping batches", "count", count)
}
// batches are sorted by batch index in increasing order
batches, err := r.db.GetCommittedBatches()
batches, err := r.db.GetCommittedBatches(1)
if err != nil {
log.Error("Failed to fetch committed L2 batches", "err", err)
return
@@ -286,6 +327,8 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
case orm.ProvingTaskFailed, orm.ProvingTaskSkipped:
// note: this is covered by UpdateSkippedBatches, but we keep it for completeness's sake
if err = r.db.UpdateRollupStatus(r.ctx, id, orm.RollupFinalizationSkipped); err != nil {
log.Warn("UpdateRollupStatus failed", "id", id, "err", err)
}
@@ -330,8 +373,9 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
return
}
txID := id + "-finalize"
// add suffix `-finalize` to avoid duplication with commit tx in unit tests
txHash, err := r.rollupSender.SendTransaction(id+"-finalize", &r.cfg.RollupContractAddress, big.NewInt(0), data)
txHash, err := r.rollupSender.SendTransaction(txID, &r.cfg.RollupContractAddress, big.NewInt(0), data)
hash := &txHash
if err != nil {
if !errors.Is(err, sender.ErrNoAvailableAccount) {
@@ -339,15 +383,15 @@ func (r *Layer2Relayer) ProcessCommittedBatches() {
}
return
}
log.Info("finalizeBatchWithProof in layer1", "id", id, "hash", hash)
log.Info("finalizeBatchWithProof in layer1", "batch_id", id, "hash", hash)
// record and sync with db, @todo handle db error
err = r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, id, hash.String(), orm.RollupFinalizing)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "id", id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", id, "err", err)
}
success = true
r.processingFinalization[id] = id
r.processingFinalization.Store(txID, id)
default:
log.Error("encounter unreachable case in ProcessCommittedBatches",
@@ -366,9 +410,12 @@ func (r *Layer2Relayer) Start() {
for {
select {
case <-ticker.C:
r.ProcessSavedEvents()
r.ProcessPendingBatches()
r.ProcessCommittedBatches()
var wg = sync.WaitGroup{}
wg.Add(3)
go r.ProcessSavedEvents(&wg)
go r.ProcessPendingBatches(&wg)
go r.ProcessCommittedBatches(&wg)
wg.Wait()
case confirmation := <-r.messageCh:
r.handleConfirmation(confirmation)
case confirmation := <-r.rollupCh:
@@ -393,36 +440,36 @@ func (r *Layer2Relayer) handleConfirmation(confirmation *sender.Confirmation) {
transactionType := "Unknown"
// check whether it is message relay transaction
if msgHash, ok := r.processingMessage[confirmation.ID]; ok {
if msgHash, ok := r.processingMessage.Load(confirmation.ID); ok {
transactionType = "MessageRelay"
// @todo handle db error
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash, orm.MsgConfirmed, confirmation.TxHash.String())
err := r.db.UpdateLayer2StatusAndLayer1Hash(r.ctx, msgHash.(string), orm.MsgConfirmed, confirmation.TxHash.String())
if err != nil {
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash, "err", err)
log.Warn("UpdateLayer2StatusAndLayer1Hash failed", "msgHash", msgHash.(string), "err", err)
}
delete(r.processingMessage, confirmation.ID)
r.processingMessage.Delete(confirmation.ID)
}
// check whether it is block commitment transaction
if batch_id, ok := r.processingCommitment[confirmation.ID]; ok {
if batchID, ok := r.processingCommitment.Load(confirmation.ID); ok {
transactionType = "BatchCommitment"
// @todo handle db error
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupCommitted)
err := r.db.UpdateCommitTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupCommitted)
if err != nil {
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
log.Warn("UpdateCommitTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
delete(r.processingCommitment, confirmation.ID)
r.processingCommitment.Delete(confirmation.ID)
}
// check whether it is proof finalization transaction
if batch_id, ok := r.processingFinalization[confirmation.ID]; ok {
if batchID, ok := r.processingFinalization.Load(confirmation.ID); ok {
transactionType = "ProofFinalization"
// @todo handle db error
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batch_id, confirmation.TxHash.String(), orm.RollupFinalized)
err := r.db.UpdateFinalizeTxHashAndRollupStatus(r.ctx, batchID.(string), confirmation.TxHash.String(), orm.RollupFinalized)
if err != nil {
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batch_id, "err", err)
log.Warn("UpdateFinalizeTxHashAndRollupStatus failed", "batch_id", batchID.(string), "err", err)
}
delete(r.processingFinalization, confirmation.ID)
r.processingFinalization.Delete(confirmation.ID)
}
log.Info("transaction confirmed in layer1", "type", transactionType, "confirmation", confirmation)
}

View File

@@ -5,6 +5,7 @@ import (
"encoding/json"
"math/big"
"os"
"sync"
"testing"
"time"
@@ -94,7 +95,10 @@ func testL2RelayerProcessSaveEvents(t *testing.T) {
err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupFinalized)
assert.NoError(t, err)
relayer.ProcessSavedEvents()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessSavedEvents(&wg)
wg.Wait()
msg, err := db.GetL2MessageByNonce(templateL2Message[0].Nonce)
assert.NoError(t, err)
@@ -150,7 +154,10 @@ func testL2RelayerProcessPendingBatches(t *testing.T) {
// err = db.UpdateRollupStatus(context.Background(), batchID, orm.RollupPending)
// assert.NoError(t, err)
relayer.ProcessPendingBatches()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessPendingBatches(&wg)
wg.Wait()
// Check if Rollup Result is changed successfully
status, err := db.GetRollupStatus(batchID)
@@ -187,9 +194,80 @@ func testL2RelayerProcessCommittedBatches(t *testing.T) {
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
relayer.ProcessCommittedBatches()
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
}
func testL2RelayerSkipBatches(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
l2Cfg := cfg.L2Config
relayer, err := NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer relayer.Stop()
createBatch := func(rollupStatus orm.RollupStatus, provingStatus orm.ProvingStatus) string {
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx, &orm.BlockInfo{}, &orm.BlockInfo{}, "0", 1, 194676) // startBlock & endBlock & parentHash & totalTxNum & totalL2Gas don't really matter here
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
err = db.UpdateRollupStatus(context.Background(), batchID, rollupStatus)
assert.NoError(t, err)
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, provingStatus)
assert.NoError(t, err)
return batchID
}
skipped := []string{
createBatch(orm.RollupCommitted, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitted, orm.ProvingTaskFailed),
}
notSkipped := []string{
createBatch(orm.RollupPending, orm.ProvingTaskSkipped),
createBatch(orm.RollupCommitting, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalizing, orm.ProvingTaskSkipped),
createBatch(orm.RollupFinalized, orm.ProvingTaskSkipped),
createBatch(orm.RollupPending, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitting, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalizing, orm.ProvingTaskFailed),
createBatch(orm.RollupFinalized, orm.ProvingTaskFailed),
createBatch(orm.RollupCommitted, orm.ProvingTaskVerified),
}
var wg = sync.WaitGroup{}
wg.Add(1)
relayer.ProcessCommittedBatches(&wg)
wg.Wait()
for _, id := range skipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizationSkipped, status)
}
for _, id := range notSkipped {
status, err := db.GetRollupStatus(id)
assert.NoError(t, err)
assert.NotEqual(t, orm.RollupFinalizationSkipped, status)
}
}

View File

@@ -53,14 +53,14 @@ type WatcherClient struct {
}
// NewL2WatcherClient take a l2geth instance to generate a l2watcherclient instance
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) *WatcherClient {
func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmations uint64, bpCfg *config.BatchProposerConfig, messengerAddress common.Address, orm database.OrmFactory) (*WatcherClient, error) {
savedHeight, err := orm.GetLayer2LatestWatchedHeight()
if err != nil {
log.Warn("fetch height from db failed", "err", err)
savedHeight = 0
}
return &WatcherClient{
watcher := &WatcherClient{
ctx: ctx,
Client: client,
orm: orm,
@@ -72,51 +72,100 @@ func NewL2WatcherClient(ctx context.Context, client *ethclient.Client, confirmat
stopped: 0,
batchProposer: newBatchProposer(bpCfg, orm),
}
// Init cache, if traces in cache expired reset it.
if err = watcher.initCache(ctx); err != nil {
log.Error("failed to init cache in l2 watcher")
return nil, err
}
return watcher, nil
}
// Start the Listening process
func (w *WatcherClient) Start() {
go func() {
if reflect.ValueOf(w.orm).IsNil() {
panic("must run L2 watcher with DB")
panic("must run L2 watcher with Persistence")
}
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
ctx, cancel := context.WithCancel(w.ctx)
for ; true; <-ticker.C {
select {
case <-w.stopCh:
return
// trace fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
default:
// get current height
number, err := w.BlockNumber(w.ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
for {
select {
case <-ctx.Done():
return
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
if err := w.tryFetchRunningMissingBlocks(w.ctx, number); err != nil {
log.Error("failed to fetchRunningMissingBlocks", "err", err)
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
// @todo handle error
if err := w.fetchContractEvent(number); err != nil {
log.Error("failed to fetchContractEvent", "err", err)
}
if err := w.batchProposer.tryProposeBatch(); err != nil {
log.Error("failed to tryProposeBatch", "err", err)
w.tryFetchRunningMissingBlocks(ctx, number)
}
}
}
}(ctx)
// event fetcher loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
// get current height
number, err := w.BlockNumber(ctx)
if err != nil {
log.Error("failed to get_BlockNumber", "err", err)
continue
}
if number >= w.confirmations {
number = number - w.confirmations
} else {
number = 0
}
w.FetchContractEvent(number)
}
}
}(ctx)
// batch proposer loop
go func(ctx context.Context) {
ticker := time.NewTicker(3 * time.Second)
defer ticker.Stop()
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
w.batchProposer.tryProposeBatch()
}
}
}(ctx)
<-w.stopCh
cancel()
}()
}
@@ -128,13 +177,14 @@ func (w *WatcherClient) Stop() {
const blockTracesFetchLimit = uint64(10)
// try fetch missing blocks if inconsistent
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) error {
// Get newest block in DB. must have blocks at that time.
func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockHeight uint64) {
// Get newest block in Persistence. must have blocks at that time.
// Don't use "block_trace" table "trace" column's BlockTrace.Number,
// because it might be empty if the corresponding rollup_result is finalized/finalization_skipped
heightInDB, err := w.orm.GetBlockTracesLatestHeight()
if err != nil {
return fmt.Errorf("failed to GetBlockTracesLatestHeight in DB: %v", err)
log.Error("failed to GetBlockTracesLatestHeight", "err", err)
return
}
// Can't get trace from genesis block, so the default start number is 1.
@@ -151,15 +201,14 @@ func (w *WatcherClient) tryFetchRunningMissingBlocks(ctx context.Context, blockH
}
// Get block traces and insert into db.
if err = w.insertBlockTraces(ctx, from, to); err != nil {
return err
if err = w.getAndStoreBlockTraces(ctx, from, to); err != nil {
log.Error("fail to getAndStoreBlockTraces", "from", from, "to", to, "err", err)
return
}
}
return nil
}
func (w *WatcherClient) insertBlockTraces(ctx context.Context, from, to uint64) error {
func (w *WatcherClient) getAndStoreBlockTraces(ctx context.Context, from, to uint64) error {
var traces []*types.BlockTrace
for number := from; number <= to; number++ {
@@ -171,7 +220,6 @@ func (w *WatcherClient) insertBlockTraces(ctx context.Context, from, to uint64)
log.Info("retrieved block trace", "height", trace.Header.Number, "hash", trace.Header.Hash().String())
traces = append(traces, trace)
}
if len(traces) > 0 {
if err := w.orm.InsertBlockTraces(traces); err != nil {
@@ -184,8 +232,8 @@ func (w *WatcherClient) insertBlockTraces(ctx context.Context, from, to uint64)
const contractEventsBlocksFetchLimit = int64(10)
// FetchContractEvent pull latest event logs from given contract address and save in DB
func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
// FetchContractEvent pull latest event logs from given contract address and save in Persistence
func (w *WatcherClient) FetchContractEvent(blockHeight uint64) {
defer func() {
log.Info("l2 watcher fetchContractEvent", "w.processedMsgHeight", w.processedMsgHeight)
}()
@@ -210,14 +258,14 @@ func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
Topics: make([][]common.Hash, 1),
}
query.Topics[0] = make([]common.Hash, 3)
query.Topics[0][0] = common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][1] = common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][2] = common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE)
query.Topics[0][0] = common.HexToHash(bridge_abi.SentMessageEventSignature)
query.Topics[0][1] = common.HexToHash(bridge_abi.RelayedMessageEventSignature)
query.Topics[0][2] = common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature)
logs, err := w.FilterLogs(w.ctx, query)
if err != nil {
log.Error("failed to get event logs", "err", err)
return err
return
}
if len(logs) == 0 {
w.processedMsgHeight = uint64(to)
@@ -228,7 +276,7 @@ func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
sentMessageEvents, relayedMessageEvents, err := w.parseBridgeEventLogs(logs)
if err != nil {
log.Error("failed to parse emitted event log", "err", err)
return err
return
}
// Update relayed message first to make sure we don't forget to update submited message.
@@ -243,18 +291,17 @@ func (w *WatcherClient) fetchContractEvent(blockHeight uint64) error {
}
if err != nil {
log.Error("Failed to update layer1 status and layer2 hash", "err", err)
return err
return
}
}
if err = w.orm.SaveL2Messages(w.ctx, sentMessageEvents); err != nil {
return err
log.Error("failed to save l2 messages", "err", err)
return
}
w.processedMsgHeight = uint64(to)
}
return nil
}
func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message, []relayedMessage, error) {
@@ -265,7 +312,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
var relayedMessages []relayedMessage
for _, vLog := range logs {
switch vLog.Topics[0] {
case common.HexToHash(bridge_abi.SENT_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.SentMessageEventSignature):
event := struct {
Target common.Address
Sender common.Address
@@ -286,7 +333,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
event.Target = common.HexToAddress(vLog.Topics[1].String())
l2Messages = append(l2Messages, &orm.L2Message{
Nonce: event.MessageNonce.Uint64(),
MsgHash: utils.ComputeMessageHash(event.Target, event.Sender, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
MsgHash: utils.ComputeMessageHash(event.Sender, event.Target, event.Value, event.Fee, event.Deadline, event.Message, event.MessageNonce).String(),
Height: vLog.BlockNumber,
Sender: event.Sender.String(),
Value: event.Value.String(),
@@ -297,7 +344,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
Calldata: common.Bytes2Hex(event.Message),
Layer2Hash: vLog.TxHash.Hex(),
})
case common.HexToHash(bridge_abi.RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.RelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}
@@ -308,7 +355,7 @@ func (w *WatcherClient) parseBridgeEventLogs(logs []types.Log) ([]*orm.L2Message
txHash: vLog.TxHash,
isSuccessful: true,
})
case common.HexToHash(bridge_abi.FAILED_RELAYED_MESSAGE_EVENT_SIGNATURE):
case common.HexToHash(bridge_abi.FailedRelayedMessageEventSignature):
event := struct {
MsgHash common.Hash
}{}

View File

@@ -1,5 +1,21 @@
package l2
import (
"context"
"github.com/scroll-tech/go-ethereum/core/types"
)
// WatcherAPI watcher api service
type WatcherAPI interface {
GetTracesByBatchIndex(ctx context.Context, index uint64) ([]*types.BlockTrace, error)
}
// GetTracesByBatchIndex get traces by batch_id.
func (w *WatcherClient) GetTracesByBatchIndex(ctx context.Context, index uint64) ([]*types.BlockTrace, error) {
id, err := w.orm.GetBatchIDByIndex(index)
if err != nil {
return nil, err
}
return w.orm.GetBlockTraces(map[string]interface{}{"batch_id": id})
}

121
bridge/l2/watcher_init.go Normal file
View File

@@ -0,0 +1,121 @@
package l2
import (
"context"
"fmt"
"math/big"
"runtime"
"github.com/scroll-tech/go-ethereum/log"
"golang.org/x/sync/errgroup"
"scroll-tech/database/cache"
"scroll-tech/database/orm"
)
func (w *WatcherClient) initCache(ctx context.Context) error {
var (
// Use at most half of the system threads.
parallel = (runtime.GOMAXPROCS(0) + 1) / 2
db = w.orm
)
// Fill unsigned block traces.
for {
batches, err := db.GetBlockBatches(
map[string]interface{}{"proving_status": orm.ProvingTaskUnassigned},
fmt.Sprintf("ORDER BY index ASC LIMIT %d;", parallel),
)
if err != nil {
log.Error("failed to get block batch", "err", err)
return err
}
if len(batches) == 0 {
break
}
var eg errgroup.Group
for _, batch := range batches {
batch := batch
eg.Go(func() error {
return w.fillTraceByNumber(ctx, batch.StartBlockNumber, batch.EndBlockNumber)
})
}
if err = eg.Wait(); err != nil {
return err
}
}
// Fill assigned and under proofing block traces into cache.
ids, err := w.orm.GetAssignedBatchIDs()
if err != nil {
return err
}
for _, id := range ids {
err = w.fillTraceByID(ctx, id)
if err != nil {
log.Error("failed to fill traces by id", "id", id, "err", err)
return err
}
}
// Fill pending block traces into cache.
for {
ids, err = w.orm.GetPendingBatches(uint64(parallel))
if err != nil {
log.Error("failed to get pending batch ids", "err", err)
return err
}
if len(ids) == 0 {
log.Info("L2 WatcherClient initCache done")
return nil
}
for _, id := range ids {
err = w.fillTraceByID(ctx, id)
if err != nil {
log.Error("failed to fill traces by id", "id", id, "err", err)
return err
}
}
}
}
// fillTraceByID Fill block traces by batch id.
func (w *WatcherClient) fillTraceByID(ctx context.Context, id string) error {
batches, err := w.orm.GetBlockBatches(map[string]interface{}{"id": id})
if err != nil || len(batches) == 0 {
return err
}
batch := batches[0]
err = w.fillTraceByNumber(ctx, batch.StartBlockNumber, batch.EndBlockNumber)
if err != nil {
return err
}
return nil
}
func (w *WatcherClient) fillTraceByNumber(ctx context.Context, start, end uint64) error {
var (
rdb = w.orm.(cache.Cache)
client = w.Client
)
for height := start; height <= end; height++ {
number := big.NewInt(0).SetUint64(height)
exist, err := rdb.ExistTrace(ctx, number)
if err != nil {
return err
}
if exist {
continue
}
trace, err := client.GetBlockTraceByNumber(ctx, number)
if err != nil {
return err
}
err = rdb.SetBlockTrace(ctx, trace)
if err != nil {
return err
}
}
return nil
}

View File

@@ -31,7 +31,8 @@ func testCreateNewWatcherAndStop(t *testing.T) {
defer l2db.Close()
l2cfg := cfg.L2Config
rc := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
rc, err := NewL2WatcherClient(context.Background(), l2Cli, l2cfg.Confirmations, l2cfg.BatchProposerConfig, l2cfg.L2MessengerAddress, l2db)
assert.NoError(t, err)
rc.Start()
defer rc.Stop()
@@ -72,7 +73,8 @@ func testMonitorBridgeContract(t *testing.T) {
address, err := bind.WaitDeployed(context.Background(), l2Cli, tx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc, err := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
assert.NoError(t, err)
rc.Start()
defer rc.Stop()
@@ -110,9 +112,9 @@ func testMonitorBridgeContract(t *testing.T) {
// check if we successfully stored events
height, err := db.GetLayer2LatestWatchedHeight()
assert.NoError(t, err)
t.Log("Height in DB is", height)
t.Log("Height in Persistence is", height)
assert.Greater(t, height, int64(previousHeight))
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 2, len(msgs))
}
@@ -134,7 +136,8 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
address, err := bind.WaitDeployed(context.Background(), l2Cli, trx)
assert.NoError(t, err)
rc := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
rc, err := prepareRelayerClient(l2Cli, cfg.L2Config.BatchProposerConfig, db, address)
assert.NoError(t, err)
rc.Start()
defer rc.Stop()
@@ -184,12 +187,12 @@ func testFetchMultipleSentMessageInOneBlock(t *testing.T) {
assert.NoError(t, err)
t.Log("LatestHeight is", height)
assert.Greater(t, height, int64(previousHeight)) // height must be greater than previousHeight because confirmations is 0
msgs, err := db.GetL2MessagesByStatus(orm.MsgPending)
msgs, err := db.GetL2Messages(map[string]interface{}{"status": orm.MsgPending})
assert.NoError(t, err)
assert.Equal(t, 5, len(msgs))
}
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) *WatcherClient {
func prepareRelayerClient(l2Cli *ethclient.Client, bpCfg *config.BatchProposerConfig, db database.OrmFactory, contractAddr common.Address) (*WatcherClient, error) {
return NewL2WatcherClient(context.Background(), l2Cli, 0, bpCfg, contractAddr, db)
}

View File

@@ -22,7 +22,7 @@ import (
"scroll-tech/bridge/sender"
)
const TX_BATCH = 50
const TXBatch = 50
var (
privateKeys []*ecdsa.PrivateKey
@@ -84,7 +84,7 @@ func testBatchSender(t *testing.T, batchSize int) {
for idx := 0; idx < newSender.NumberOfAccounts(); idx++ {
index := idx
eg.Go(func() error {
for i := 0; i < TX_BATCH; i++ {
for i := 0; i < TXBatch; i++ {
toAddr := common.HexToAddress("0x4592d8f8d7b001e72cb26a73e4fa1806a51ac79d")
id := strconv.Itoa(i + index*1000)
_, err := newSender.SendTransaction(id, &toAddr, big.NewInt(1), nil)
@@ -103,7 +103,7 @@ func testBatchSender(t *testing.T, batchSize int) {
if err := eg.Wait(); err != nil {
t.Error(err)
}
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TX_BATCH*newSender.NumberOfAccounts())
t.Logf("successful send batch txs, batch size: %d, total count: %d", newSender.NumberOfAccounts(), TXBatch*newSender.NumberOfAccounts())
// avoid 10 mins cause testcase panic
after := time.After(80 * time.Second)

View File

@@ -4,18 +4,19 @@ import (
"context"
"crypto/ecdsa"
"math/big"
"scroll-tech/common/docker"
"testing"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
"scroll-tech/bridge/config"
"scroll-tech/bridge/mock_bridge"
"scroll-tech/common/docker"
)
var (
@@ -29,6 +30,7 @@ var (
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
redisImg docker.ImgInstance
// clients
l1Client *ethclient.Client
@@ -55,16 +57,20 @@ func setupEnv(t *testing.T) {
var err error
privateKey, err = crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121212"))
assert.NoError(t, err)
messagePrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121213"))
assert.NoError(t, err)
rollupPrivateKey, err := crypto.ToECDSA(common.FromHex("1212121212121212121212121212121212121212121212121212121212121214"))
assert.NoError(t, err)
// Load config.
cfg, err = config.NewConfig("../config.json")
assert.NoError(t, err)
cfg.L1Config.Confirmations = 0
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L1Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L1Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
cfg.L2Config.Confirmations = 0
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{privateKey}
cfg.L2Config.RelayerConfig.MessageSenderPrivateKeys = []*ecdsa.PrivateKey{messagePrivateKey}
cfg.L2Config.RelayerConfig.RollupSenderPrivateKeys = []*ecdsa.PrivateKey{rollupPrivateKey}
// Create l1geth container.
l1gethImg = docker.NewTestL1Docker(t)
@@ -77,8 +83,11 @@ func setupEnv(t *testing.T) {
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
redisImg = docker.NewTestRedisDocker(t)
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
// Create l1geth and l2geth client.
l1Client, err = ethclient.Dial(cfg.L1Config.Endpoint)
@@ -89,6 +98,47 @@ func setupEnv(t *testing.T) {
// Create l1 and l2 auth
l1Auth = prepareAuth(t, l1Client, privateKey)
l2Auth = prepareAuth(t, l2Client, privateKey)
// send some balance to message and rollup sender
transferEther(t, l1Auth, l1Client, messagePrivateKey)
transferEther(t, l1Auth, l1Client, rollupPrivateKey)
transferEther(t, l2Auth, l2Client, messagePrivateKey)
transferEther(t, l2Auth, l2Client, rollupPrivateKey)
}
func transferEther(t *testing.T, auth *bind.TransactOpts, client *ethclient.Client, privateKey *ecdsa.PrivateKey) {
targetAddress := crypto.PubkeyToAddress(privateKey.PublicKey)
gasPrice, err := client.SuggestGasPrice(context.Background())
assert.NoError(t, err)
gasPrice.Mul(gasPrice, big.NewInt(2))
// Get pending nonce
nonce, err := client.PendingNonceAt(context.Background(), auth.From)
assert.NoError(t, err)
// 200 ether should be enough
value, ok := big.NewInt(0).SetString("0xad78ebc5ac6200000", 0)
assert.Equal(t, ok, true)
tx := types.NewTx(&types.LegacyTx{
Nonce: nonce,
To: &targetAddress,
Value: value,
Gas: 500000,
GasPrice: gasPrice,
})
signedTx, err := auth.Signer(auth.From, tx)
assert.NoError(t, err)
err = client.SendTransaction(context.Background(), signedTx)
assert.NoError(t, err)
receipt, err := bind.WaitMined(context.Background(), client, signedTx)
assert.NoError(t, err)
if receipt.Status != types.ReceiptStatusSuccessful {
t.Fatalf("Call failed")
}
}
func free(t *testing.T) {
@@ -101,6 +151,9 @@ func free(t *testing.T) {
if l2gethImg != nil {
assert.NoError(t, l2gethImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
}
func prepareContracts(t *testing.T) {
@@ -150,6 +203,9 @@ func TestFunction(t *testing.T) {
// l1 rollup and watch rollup events
t.Run("TestCommitBatchAndFinalizeBatch", testCommitBatchAndFinalizeBatch)
// l2 message
t.Run("testRelayL2MessageSucceed", testRelayL2MessageSucceed)
t.Cleanup(func() {
free(t)
})

View File

@@ -0,0 +1,177 @@
package tests
import (
"context"
"math/big"
"sync"
"testing"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
"github.com/scroll-tech/go-ethereum/accounts/abi/bind"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
)
func testRelayL2MessageSucceed(t *testing.T) {
// Create db handler and reset db.
db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(db.GetDB().DB))
defer db.Close()
var wg sync.WaitGroup
wg.Add(3)
prepareContracts(t)
// Create L2Relayer
l2Cfg := cfg.L2Config
l2Relayer, err := l2.NewLayer2Relayer(context.Background(), db, l2Cfg.RelayerConfig)
assert.NoError(t, err)
defer l2Relayer.Stop()
// Create L2Watcher
l2Watcher, err := l2.NewL2WatcherClient(context.Background(), l2Client, 0, l2Cfg.BatchProposerConfig, l2Cfg.L2MessengerAddress, db)
assert.NoError(t, err)
// Create L1Watcher
l1Cfg := cfg.L1Config
l1Watcher := l1.NewWatcher(context.Background(), l1Client, 0, 0, l1Cfg.L1MessengerAddress, l1Cfg.RollupContractAddress, db)
// send message through l2 messenger contract
nonce, err := l2MessengerInstance.MessageNonce(&bind.CallOpts{})
assert.NoError(t, err)
sendTx, err := l2MessengerInstance.SendMessage(l2Auth, l1Auth.From, big.NewInt(0), common.Hex2Bytes("00112233"), big.NewInt(0))
assert.NoError(t, err)
sendReceipt, err := bind.WaitMined(context.Background(), l2Client, sendTx)
assert.NoError(t, err)
if sendReceipt.Status != types.ReceiptStatusSuccessful || err != nil {
t.Fatalf("Call failed")
}
// l2 watch process events
l2Watcher.FetchContractEvent(sendReceipt.BlockNumber.Uint64())
// check db status
msg, err := db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgPending)
assert.Equal(t, msg.Sender, l2Auth.From.String())
assert.Equal(t, msg.Target, l1Auth.From.String())
// add fake blocks
traces := []*types.BlockTrace{
{
Header: &types.Header{
Number: sendReceipt.BlockNumber,
ParentHash: common.Hash{},
Difficulty: big.NewInt(0),
BaseFee: big.NewInt(0),
},
StorageTrace: &types.StorageTrace{},
},
}
err = db.InsertBlockTraces(traces)
assert.NoError(t, err)
// add fake batch
dbTx, err := db.Beginx()
assert.NoError(t, err)
batchID, err := db.NewBatchInDBTx(dbTx,
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
&orm.BlockInfo{
Number: traces[0].Header.Number.Uint64(),
Hash: traces[0].Header.Hash().String(),
ParentHash: traces[0].Header.ParentHash.String(),
},
traces[0].Header.ParentHash.String(), 1, 194676)
assert.NoError(t, err)
err = db.SetBatchIDForBlocksInDBTx(dbTx, []uint64{
traces[0].Header.Number.Uint64(),
traces[0].Header.Number.Uint64()}, batchID)
assert.NoError(t, err)
err = dbTx.Commit()
assert.NoError(t, err)
// add dummy proof
tProof := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
tInstanceCommitments := []byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}
err = db.UpdateProofByID(context.Background(), batchID, tProof, tInstanceCommitments, 100)
assert.NoError(t, err)
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
// process pending batch and check status
l2Relayer.ProcessPendingBatches(&wg)
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
commitTxHash, err := db.GetCommitTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, commitTxHash.Valid)
commitTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(commitTxHash.String))
assert.NoError(t, err)
commitTxReceipt, err := bind.WaitMined(context.Background(), l1Client, commitTx)
assert.NoError(t, err)
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch CommitBatch rollup events
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, status)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches(&wg)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
finalizeTxHash, err := db.GetFinalizeTxHash(batchID)
assert.NoError(t, err)
assert.Equal(t, true, finalizeTxHash.Valid)
finalizeTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(finalizeTxHash.String))
assert.NoError(t, err)
finalizeTxReceipt, err := bind.WaitMined(context.Background(), l1Client, finalizeTx)
assert.NoError(t, err)
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch FinalizeBatch events
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, status)
// process l2 messages
l2Relayer.ProcessSavedEvents(&wg)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgSubmitted)
relayTxHash, err := db.GetRelayL2MessageTxHash(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, true, relayTxHash.Valid)
relayTx, _, err := l1Client.TransactionByHash(context.Background(), common.HexToHash(relayTxHash.String))
assert.NoError(t, err)
relayTxReceipt, err := bind.WaitMined(context.Background(), l1Client, relayTx)
assert.NoError(t, err)
assert.Equal(t, len(relayTxReceipt.Logs), 1)
// fetch message relayed events
err = l1Watcher.FetchContractEvent(relayTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
msg, err = db.GetL2MessageByNonce(nonce.Uint64())
assert.NoError(t, err)
assert.Equal(t, msg.Status, orm.MsgConfirmed)
}

View File

@@ -3,10 +3,12 @@ package tests
import (
"context"
"math/big"
"sync"
"testing"
"scroll-tech/database"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
"testing"
"scroll-tech/bridge/l1"
"scroll-tech/bridge/l2"
@@ -78,8 +80,12 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
err = dbTx.Commit()
assert.NoError(t, err)
var wg = sync.WaitGroup{}
wg.Add(1)
// process pending batch and check status
l2Relayer.ProcessPendingBatches()
l2Relayer.ProcessPendingBatches(&wg)
wg.Wait()
status, err := db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitting, status)
@@ -93,7 +99,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(commitTxReceipt.Logs), 1)
// fetch rollup events
assert.NoError(t, err)
err = l1Watcher.FetchContractEvent(commitTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)
@@ -108,8 +113,11 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
err = db.UpdateProvingStatus(batchID, orm.ProvingTaskVerified)
assert.NoError(t, err)
wg.Add(1)
// process committed batch and check status
l2Relayer.ProcessCommittedBatches()
l2Relayer.ProcessCommittedBatches(&wg)
wg.Wait()
status, err = db.GetRollupStatus(batchID)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalizing, status)
@@ -123,7 +131,6 @@ func testCommitBatchAndFinalizeBatch(t *testing.T) {
assert.Equal(t, len(finalizeTxReceipt.Logs), 1)
// fetch rollup events
assert.NoError(t, err)
err = l1Watcher.FetchContractEvent(finalizeTxReceipt.BlockNumber.Uint64())
assert.NoError(t, err)
status, err = db.GetRollupStatus(batchID)

View File

@@ -20,8 +20,8 @@ func encodePacked(input ...[]byte) []byte {
// ComputeMessageHash compute the message hash
func ComputeMessageHash(
target common.Address,
sender common.Address,
target common.Address,
value *big.Int,
fee *big.Int,
deadline *big.Int,
@@ -29,8 +29,8 @@ func ComputeMessageHash(
messageNonce *big.Int,
) common.Hash {
packed := encodePacked(
target.Bytes(),
sender.Bytes(),
target.Bytes(),
math.U256Bytes(value),
math.U256Bytes(fee),
math.U256Bytes(deadline),

View File

@@ -7,6 +7,7 @@ import (
"scroll-tech/bridge/utils"
"github.com/scroll-tech/go-ethereum/common"
"github.com/stretchr/testify/assert"
)
func TestKeccak2(t *testing.T) {
@@ -28,15 +29,13 @@ func TestKeccak2(t *testing.T) {
func TestComputeMessageHash(t *testing.T) {
hash := utils.ComputeMessageHash(
common.HexToAddress("0xdafea492d9c6733ae3d56b7ed1adb60692c98bc5"),
common.HexToAddress("0xeafea492d9c6733ae3d56b7ed1adb60692c98bf7"),
big.NewInt(1),
big.NewInt(2),
big.NewInt(1234567),
common.Hex2Bytes("0011223344"),
big.NewInt(3),
common.HexToAddress("0xd7227113b92e537aeda220d5a2f201b836e5879d"),
common.HexToAddress("0x47c02b023b6787ef4e503df42bbb1a94f451a1c0"),
big.NewInt(5000000000000000),
big.NewInt(0),
big.NewInt(1674204924),
common.Hex2Bytes("8eaac8a30000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000007138b17fc82d7e954b3bd2f98d8166d03e5e569b0000000000000000000000000000000000000000000000000011c37937e0800000000000000000000000000000000000000000000000000000000000000000800000000000000000000000000000000000000000000000000000000000000000"),
big.NewInt(30706),
)
if hash != common.HexToHash("0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b") {
t.Fatalf("Invalid ComputeMessageHash, want %s, got %s", "0x58c9a5abfd2a558bb6a6fd5192b36fe9325d98763bafd3a51a1ea28a5d0b990b", hash.Hex())
}
assert.Equal(t, hash.String(), "0x920e59f62ca89a0f481d44961c55d299dd20c575693692d61fdf3ca579d8edf3")
}

View File

@@ -179,6 +179,13 @@ linters:
- depguard
- gocyclo
- unparam
- exportloopref
- sqlclosecheck
- rowserrcheck
- durationcheck
- bidichk
- typecheck
- unused
enable-all: false
disable:
@@ -200,16 +207,7 @@ issues:
# Exclude some linters from running on tests files.
- path: _test\.go
linters:
- gocyclo
- errcheck
- dupl
- gosec
# Exclude known linters from partially hard-vendored code,
# which is impossible to exclude via "nolint" comments.
- path: internal/hmac/
text: "weak cryptographic primitive"
linters:
- gosec
# Exclude some staticcheck messages
@@ -217,18 +215,6 @@ issues:
- staticcheck
text: "SA9003:"
- linters:
- golint
text: "package comment should be of the form"
- linters:
- golint
text: "don't use ALL_CAPS in Go names;"
- linters:
- golint
text: "don't use underscores in Go names;"
# Exclude lll issues for long lines with go:generate
- linters:
- lll

View File

@@ -0,0 +1,63 @@
imagePrefix = 'scrolltech'
credentialDocker = 'dockerhub'
pipeline {
agent any
options {
timeout (20)
}
tools {
go 'go-1.18'
nodejs "nodejs"
}
environment {
GO111MODULE = 'on'
PATH="/home/ubuntu/.cargo/bin:$PATH"
// LOG_DOCKER = 'true'
}
stages {
stage('Tag') {
steps {
script {
TAGNAME = sh(returnStdout: true, script: 'git tag -l --points-at HEAD')
sh "echo ${TAGNAME}"
// ...
}
}
}
stage('Build') {
environment {
// Extract the username and password of our credentials into "DOCKER_CREDENTIALS_USR" and "DOCKER_CREDENTIALS_PSW".
// (NOTE 1: DOCKER_CREDENTIALS will be set to "your_username:your_password".)
// The new variables will always be YOUR_VARIABLE_NAME + _USR and _PSW.
// (NOTE 2: You can't print credentials in the pipeline for security reasons.)
DOCKER_CREDENTIALS = credentials('dockerhub')
}
steps {
withCredentials([usernamePassword(credentialsId: "${credentialDocker}", passwordVariable: 'dockerPassword', usernameVariable: 'dockerUser')]) {
// Use a scripted pipeline.
script {
stage('Push image') {
if (TAGNAME == ""){
return;
}
sh "docker login --username=${dockerUser} --password=${dockerPassword}"
sh "make -C bridge docker"
sh "make -C coordinator docker"
sh "docker tag scrolltech/bridge:latest scrolltech/bridge:${TAGNAME}"
sh "docker tag scrolltech/coordinator:latest scrolltech/coordinator:${TAGNAME}"
sh "docker push scrolltech/bridge:${TAGNAME}"
sh "docker push scrolltech/coordinator:${TAGNAME}"
}
}
}
}
}
}
post {
always {
cleanWs()
slackSend(message: "${JOB_BASE_NAME} ${GIT_COMMIT} #${TAGNAME} Tag build ${currentBuild.result}")
}
}
}

View File

@@ -1,17 +1,13 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"sync"
"testing"
"time"
"github.com/docker/docker/pkg/reexec"
cmap "github.com/orcaman/concurrent-map"
"github.com/stretchr/testify/assert"
)
var verbose bool
@@ -51,48 +47,6 @@ func NewCmd(t *testing.T, name string, args ...string) *Cmd {
}
}
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(parallel bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if parallel {
go func() {
_ = cmd.Run()
}()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// RegistFunc register check func
func (t *Cmd) RegistFunc(key string, check checkFunc) {
t.checkFuncs.Set(key, check)
@@ -103,39 +57,6 @@ func (t *Cmd) UnRegistFunc(key string) {
t.checkFuncs.Pop(key)
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}
func (t *Cmd) runCmd() {
cmd := exec.Command(t.args[0], t.args[1:]...) //nolint:gosec
cmd.Stdout = t

114
common/cmd/cmd_app.go Normal file
View File

@@ -0,0 +1,114 @@
package cmd
import (
"fmt"
"os"
"os/exec"
"strings"
"time"
"github.com/docker/docker/pkg/reexec"
"github.com/stretchr/testify/assert"
)
// RunApp exec's the current binary using name as argv[0] which will trigger the
// reexec init function for that name (e.g. "geth-test" in cmd/geth/run_test.go)
func (t *Cmd) RunApp(waitResult func() bool) {
t.Log("cmd: ", append([]string{t.name}, t.args...))
cmd := &exec.Cmd{
Path: reexec.Self(),
Args: append([]string{t.name}, t.args...),
Stderr: t,
Stdout: t,
}
if waitResult != nil {
go func() {
_ = cmd.Run()
}()
waitResult()
} else {
_ = cmd.Run()
}
t.mu.Lock()
t.cmd = cmd
t.mu.Unlock()
}
// WaitExit wait util process exit.
func (t *Cmd) WaitExit() {
// Wait all the check funcs are finished or test status is failed.
for !(t.Failed() || t.checkFuncs.IsEmpty()) {
<-time.After(time.Millisecond * 500)
}
// Send interrupt signal.
t.mu.Lock()
_ = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// Interrupt send interrupt signal.
func (t *Cmd) Interrupt() {
t.mu.Lock()
t.Err = t.cmd.Process.Signal(os.Interrupt)
t.mu.Unlock()
}
// WaitResult return true when get the keyword during timeout.
func (t *Cmd) WaitResult(timeout time.Duration, keyword string) bool {
if keyword == "" {
return false
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return true
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
return false
}
// ExpectWithTimeout wait result during timeout time.
func (t *Cmd) ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string) {
if keyword == "" {
return
}
okCh := make(chan struct{}, 1)
t.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
waitResult := func() {
defer t.UnRegistFunc(keyword)
select {
case <-okCh:
return
case <-time.After(timeout):
assert.Fail(t, fmt.Sprintf("didn't get the desired result before timeout, keyword: %s", keyword))
}
}
if parallel {
go waitResult()
} else {
waitResult()
}
}

View File

@@ -0,0 +1,120 @@
package docker
import (
"context"
"fmt"
"strconv"
"strings"
"testing"
"time"
"github.com/docker/docker/api/types"
"scroll-tech/common/cmd"
"scroll-tech/common/utils"
)
// ImgRedis the redis image.
type ImgRedis struct {
image string
name string
id string
port int
running bool
cmd *cmd.Cmd
}
// NewImgRedis return redis img instance.
func NewImgRedis(t *testing.T, image string, port int) ImgInstance {
img := &ImgRedis{
image: image,
name: fmt.Sprintf("%s-%d", image, time.Now().Nanosecond()),
port: port,
}
img.cmd = cmd.NewCmd(t, img.name, img.prepare()...)
return img
}
// Start run image and check if it is running healthily.
func (r *ImgRedis) Start() error {
id := GetContainerID(r.name)
if id != "" {
return fmt.Errorf("container already exist, name: %s", r.name)
}
// Add check status function.
keyword := "Ready to accept connections"
okCh := make(chan struct{}, 1)
r.cmd.RegistFunc(keyword, func(buf string) {
if strings.Contains(buf, keyword) {
select {
case okCh <- struct{}{}:
default:
return
}
}
})
defer r.cmd.UnRegistFunc(keyword)
// Start redis.
r.cmd.RunCmd(true)
// Wait result of keyword.
select {
case <-okCh:
utils.TryTimes(3, func() bool {
r.id = GetContainerID(r.name)
return r.id != ""
})
case <-time.After(time.Second * 10):
}
// Set redis status.
r.running = r.id != ""
if !r.running {
_ = r.Stop()
return fmt.Errorf("failed to start image: %s", r.image)
}
return nil
}
// Stop the docker container.
func (r *ImgRedis) Stop() error {
if !r.running {
return nil
}
r.running = false
ctx := context.Background()
id := GetContainerID(r.name)
if id != "" {
timeout := time.Second * 3
if err := cli.ContainerStop(ctx, id, &timeout); err != nil {
return err
}
r.id = id
}
// remove the stopped container.
return cli.ContainerRemove(ctx, r.id, types.ContainerRemoveOptions{})
}
// Endpoint return the connection endpoint.
func (r *ImgRedis) Endpoint() string {
if !r.running {
return ""
}
port := 6379
if r.port != 0 {
port = r.port
}
return fmt.Sprintf("redis://default:redistest@localhost:%d/0", port)
}
// docker run --name redis-xxx -p randomport:6379 --requirepass "redistest" redis
func (r *ImgRedis) prepare() []string {
cmds := []string{"docker", "run", "--name", r.name}
var ports []string
if r.port != 0 {
ports = append(ports, []string{"-p", strconv.Itoa(r.port) + ":6379"}...)
}
return append(append(cmds, ports...), r.image)
}

View File

@@ -16,6 +16,7 @@ func TestDocker(t *testing.T) {
t.Run("testL1Geth", testL1Geth)
t.Run("testL2Geth", testL2Geth)
t.Run("testDB", testDB)
t.Run("testRedis", testRedis)
}
func testL1Geth(t *testing.T) {
@@ -57,3 +58,8 @@ func testDB(t *testing.T) {
assert.NoError(t, err)
assert.NoError(t, db.Ping())
}
func testRedis(t *testing.T) {
redisImg := NewTestRedisDocker(t)
defer redisImg.Stop()
}

View File

@@ -33,6 +33,8 @@ func GetContainerID(name string) string {
filter := filters.NewArgs()
filter.Add("name", name)
lst, _ := cli.ContainerList(context.Background(), types.ContainerListOptions{
Latest: true,
Limit: 1,
Filters: filter,
})
if len(lst) > 0 {

View File

@@ -6,6 +6,7 @@ import (
"math/big"
"testing"
"github.com/go-redis/redis/v8"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/stretchr/testify/assert"
@@ -17,6 +18,7 @@ var (
l1StartPort = 10000
l2StartPort = 20000
dbStartPort = 30000
rsStartPort = 40000
)
// NewTestL1Docker starts and returns l1geth docker
@@ -76,3 +78,24 @@ func NewTestDBDocker(t *testing.T, driverName string) ImgInstance {
return imgDB
}
// NewTestRedisDocker starts and run redis docker.
func NewTestRedisDocker(t *testing.T) ImgInstance {
id, _ := rand.Int(rand.Reader, big.NewInt(2000))
imgRedis := NewImgRedis(t, "redis", rsStartPort+int(id.Int64()))
assert.NoError(t, imgRedis.Start())
op, err := redis.ParseURL(imgRedis.Endpoint())
assert.NoError(t, err)
if t.Failed() {
return nil
}
rdb := redis.NewClient(op)
utils.TryTimes(3, func() bool {
err = rdb.Ping(context.Background()).Err()
return err == nil
})
return imgRedis
}

View File

@@ -4,26 +4,27 @@ go 1.18
require (
github.com/docker/docker v20.10.21+incompatible
github.com/go-redis/redis/v8 v8.11.5
github.com/jmoiron/sqlx v1.3.5
github.com/lib/pq v1.10.6
github.com/mattn/go-colorable v0.1.8
github.com/mattn/go-isatty v0.0.14
github.com/orcaman/concurrent-map v1.0.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
)
require (
github.com/Microsoft/go-winio v0.6.0 // indirect
github.com/VictoriaMetrics/fastcache v1.6.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/deepmap/oapi-codegen v1.8.2 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/docker/distribution v2.8.1+incompatible // indirect
github.com/docker/go-connections v0.4.0 // indirect
github.com/docker/go-units v0.5.0 // indirect
@@ -35,7 +36,7 @@ require (
github.com/go-stack/stack v1.8.0 // indirect
github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/google/go-cmp v0.5.9 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/graph-gophers/graphql-go v1.3.0 // indirect
@@ -57,6 +58,7 @@ require (
github.com/moby/term v0.0.0-20220808134915-39b0c02b01ae // indirect
github.com/morikuni/aec v1.0.0 // indirect
github.com/olekukonko/tablewriter v0.0.5 // indirect
github.com/onsi/gomega v1.25.0 // indirect
github.com/opencontainers/go-digest v1.0.0 // indirect
github.com/opencontainers/image-spec v1.0.2 // indirect
github.com/opentracing/opentracing-go v1.1.0 // indirect
@@ -78,11 +80,12 @@ require (
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/mod v0.7.0 // indirect
golang.org/x/net v0.3.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/text v0.5.0 // indirect
golang.org/x/net v0.5.0 // indirect
golang.org/x/sync v0.1.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/text v0.6.0 // indirect
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect

View File

@@ -78,8 +78,8 @@ github.com/cespare/cp v0.1.0 h1:SE+dxFebS7Iik5LK0tsi1k9ZCxEaFX4AjQmoyA+1dJk=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -107,6 +107,8 @@ github.com/deepmap/oapi-codegen v1.8.2 h1:SegyeYGcdi0jLLrpbCMoJxnUUn8GBXHsvr4rbz
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
@@ -155,6 +157,8 @@ github.com/go-ole/go-ole v1.2.6 h1:/Fpf6oFPoeFik9ty7siob0G6Ke8QvQEuVcuChpwXzpY=
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
github.com/go-redis/redis/v8 v8.11.5 h1:AcZZR7igkdvfVmQTPnu9WE37LRrO/YrBH5zWyjDC0oI=
github.com/go-redis/redis/v8 v8.11.5/go.mod h1:gREzHqY1hg6oD9ngVRbLStwAWKhA0FEgq8Jd4h5lpwo=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible/go.mod h1:F8jJfvm2KbVjc5NqelyYJmf/v5J0dwNLS2mL4sNA1Jg=
github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE=
@@ -199,8 +203,8 @@ github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -342,20 +346,21 @@ github.com/mschoch/smat v0.0.0-20160514031455-90eadee771ae/go.mod h1:qAyveg+e4CE
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4 h1:DQuhQpB1tVlglWS2hLQ5OV6B5r8aGxSrPc5Qo6uTN78=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0 h1:2mOpI4JVVPBN+WQRa0WKH2eXR+Ey+uK4n7Zj0aYpIQA=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1 h1:o0+MgICZLuZ7xjH7Vx6zS/zcu93/BEp1VwkIW1mEXCE=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/onsi/gomega v1.25.0/go.mod h1:r+zV744Re+DiYCIPRlYOTxn0YkOLcAnW8k1xXdMPGhM=
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.2 h1:9yCKha/T5XdGtO0q9Q9a6T5NUCsTn/DrBg0D7ufOcFM=
@@ -404,8 +409,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -430,6 +435,7 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -437,8 +443,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -482,8 +489,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -541,8 +548,8 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.3.0 h1:VWL6FNY2bEEmsGVKabSlHu5Irp34xmMRoqb/9lF9lxk=
golang.org/x/net v0.3.0/go.mod h1:MBQ8lrhLObU/6UmLb4fmbmk5OcyYmqtbGd/9yIeKjEE=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/net v0.5.0/go.mod h1:DivGGAXEgPSlEBzxGzZI+ZLohi+xUj054jfeKui00ws=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -606,8 +613,8 @@ golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -619,8 +626,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/text v0.5.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -662,7 +669,6 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
gonum.org/v1/gonum v0.0.0-20180816165407-929014505bf4/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.0.0-20181121035319-3f7ecaa7e8ca/go.mod h1:Y+Yx5eoAFn32cQvJDxZx5Dpnq+c3wtXuadVZAcxbbBo=
gonum.org/v1/gonum v0.6.0/go.mod h1:9mxDZsDKxgMAuccQkewq682L+0eCu4dCN2yonUJTCLU=

View File

@@ -1380,7 +1380,7 @@ dependencies = [
[[package]]
name = "halo2-mpt-circuits"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0902#b0ffab97316f9cf4b9e65aba398a047a8d6424a1"
source = "git+https://github.com/scroll-tech/mpt-circuit.git?branch=scroll-dev-0920-fix#d6bd0f291d41c4585e783d2d94a77fd80e1ba47e"
dependencies = [
"bitvec 0.22.3",
"ff 0.11.1",
@@ -1451,7 +1451,7 @@ dependencies = [
[[package]]
name = "halo2_proofs"
version = "0.2.0"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6f18f38e82d302cd8b6ce8809b59c32350b019a3"
source = "git+https://github.com/scroll-tech/halo2.git?branch=scroll-dev-0902#6b8c8a07da9fbf5d5878831e35360b8c5e6d89a3"
dependencies = [
"blake2b_simd",
"cfg-if 0.1.10",
@@ -2397,7 +2397,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/appliedzkp/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/appliedzkp/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -2407,7 +2407,7 @@ dependencies = [
[[package]]
name = "poseidon"
version = "0.2.0"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#5d29df01a95e3df6334080d28e983407f56b5da3"
source = "git+https://github.com/privacy-scaling-explorations/poseidon.git#0b9965fbcd9e03559088b8f68489592286bc55e0"
dependencies = [
"group",
"halo2curves 0.2.1 (git+https://github.com/privacy-scaling-explorations/halo2curves?tag=0.3.0)",
@@ -3389,7 +3389,7 @@ checksum = "dcf81ac59edc17cc8697ff311e8f5ef2d99fcbd9817b34cec66f90b6c3dfd987"
[[package]]
name = "types"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"base64 0.13.0",
"blake2",
@@ -3816,7 +3816,7 @@ checksum = "c394b5bd0c6f669e7275d9c20aa90ae064cb22e75a1cad54e1b34088034b149f"
[[package]]
name = "zkevm"
version = "0.1.0"
source = "git+https://github.com/scroll-tech/common-rs#4a299c70835179be7fcf007ebb122b428d063c56"
source = "git+https://github.com/scroll-tech/scroll-zkevm?branch=fix/mpt_limit#cfe8b4e959d6e09b3a45b58e45589ae62988a729"
dependencies = [
"anyhow",
"blake2",

View File

@@ -8,8 +8,8 @@ edition = "2021"
crate-type = ["staticlib"]
[dependencies]
zkevm = { git = "https://github.com/scroll-tech/common-rs" }
types = { git = "https://github.com/scroll-tech/common-rs" }
zkevm = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
types = { git = "https://github.com/scroll-tech/scroll-zkevm", branch="fix/mpt_limit" }
log = "0.4"
env_logger = "0.9.0"

View File

@@ -4,12 +4,12 @@ import (
"crypto/ecdsa"
"crypto/rand"
"encoding/hex"
"encoding/json"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/common/hexutil"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/crypto"
"github.com/scroll-tech/go-ethereum/rlp"
)
// RespStatus represents status code from roller to scroll
@@ -36,12 +36,12 @@ type AuthMsg struct {
type Identity struct {
// Roller name
Name string `json:"name"`
// Time of message creation
Timestamp int64 `json:"timestamp"`
// Unverified Unix timestamp of message creation
Timestamp uint32 `json:"timestamp"`
// Roller public key
PublicKey string `json:"publicKey"`
// Version is common.Version+ZK_VERSION. Use the following to check the latest ZK_VERSION version.
// curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
// Version is common.Version+ZkVersion. Use the following to check the latest ZkVersion version.
// curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
Version string `json:"version"`
// Random unique token generated by manager
Token string `json:"token"`
@@ -115,12 +115,11 @@ func (a *AuthMsg) PublicKey() (string, error) {
// Hash returns the hash of the auth message, which should be the message used
// to construct the Signature.
func (i *Identity) Hash() ([]byte, error) {
bs, err := json.Marshal(i)
byt, err := rlp.EncodeToBytes(i)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}
@@ -204,12 +203,12 @@ type ProofDetail struct {
// Hash return proofMsg content hash.
func (z *ProofDetail) Hash() ([]byte, error) {
bs, err := json.Marshal(z)
byt, err := rlp.EncodeToBytes(z)
if err != nil {
return nil, err
}
hash := crypto.Keccak256Hash(bs)
hash := crypto.Keccak256Hash(byt)
return hash[:], nil
}

View File

@@ -16,7 +16,7 @@ func TestAuthMessageSignAndVerify(t *testing.T) {
authMsg := &AuthMsg{
Identity: &Identity{
Name: "testRoller",
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))

View File

@@ -1,30 +1,10 @@
package utils
import (
"sync/atomic"
"github.com/scroll-tech/go-ethereum/core/types"
"golang.org/x/sync/errgroup"
)
// ComputeTraceGasCost computes gascost based on ExecutionResults.StructLogs.GasCost
func ComputeTraceGasCost(trace *types.BlockTrace) uint64 {
var (
gasCost uint64
eg errgroup.Group
)
for idx := range trace.ExecutionResults {
i := idx
eg.Go(func() error {
var sum uint64
for _, log := range trace.ExecutionResults[i].StructLogs {
sum += log.GasCost
}
atomic.AddUint64(&gasCost, sum)
return nil
})
}
_ = eg.Wait()
return gasCost
return trace.Header.GasUsed
}

View File

@@ -19,13 +19,7 @@ func TestComputeTraceCost(t *testing.T) {
blockTrace := &types.BlockTrace{}
err = json.Unmarshal(templateBlockTrace, blockTrace)
assert.NoError(t, err)
var sum uint64
for _, v := range blockTrace.ExecutionResults {
for _, sv := range v.StructLogs {
sum += sv.GasCost
}
}
res := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, sum, res)
var expected = blockTrace.Header.GasUsed
got := utils.ComputeTraceGasCost(blockTrace)
assert.Equal(t, expected, got)
}

View File

@@ -5,7 +5,7 @@ import (
"runtime/debug"
)
var tag = "prealpha-v9.0"
var tag = "prealpha-v12.1"
var commit = func() string {
if info, ok := debug.ReadBuildInfo(); ok {
@@ -22,8 +22,8 @@ var commit = func() string {
return ""
}()
// ZK_VERSION is commit-id of common/libzkp/impl/cargo.lock/common-rs
var ZK_VERSION string
// ZkVersion is commit-id of common/libzkp/impl/cargo.lock/scroll-zkevm
var ZkVersion string
// Version denote the version of scroll protocol, including the l2geth, relayer, coordinator, roller, contracts and etc.
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZK_VERSION)
var Version = fmt.Sprintf("%s-%s-%s", tag, commit, ZkVersion)

View File

@@ -232,6 +232,50 @@ function initialize(uint256 _chainId) external nonpayable
|---|---|---|
| _chainId | uint256 | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(bytes32 _blockHash) external view returns (bool)
```
Return whether the block is finalized by block hash.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHash | bytes32 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### isBlockFinalized
```solidity
function isBlockFinalized(uint256 _blockHeight) external view returns (bool)
```
Return whether the block is finalized by block height.
#### Parameters
| Name | Type | Description |
|---|---|---|
| _blockHeight | uint256 | undefined |
#### Returns
| Name | Type | Description |
|---|---|---|
| _0 | bool | undefined |
### lastFinalizedBatchID
```solidity

View File

@@ -97,7 +97,7 @@ contract L1ScrollMessenger is OwnableUpgradeable, PausableUpgradeable, ScrollMes
require(!isMessageExecuted[_msghash], "Message successfully executed");
// @todo check proof
require(IZKRollup(rollup).verifyMessageStateProof(_proof.batchIndex, _proof.blockHeight), "invalid state proof");
require(IZKRollup(rollup).isBlockFinalized(_proof.blockHeight), "invalid state proof");
require(ZkTrieVerifier.verifyMerkleProof(_proof.merkleProof), "invalid proof");
// @todo check `_to` address to avoid attack.

View File

@@ -386,7 +386,7 @@ library RollupVerifier {
t1
)
);
update_hash_scalar(7326291674247555594112707886804937707847188185923070866278273345303869756280, absorbing, 0);
update_hash_scalar(18620528901694425296072105892920066495478887717015933899493919566746585676047, absorbing, 0);
update_hash_point(m[0], m[1], absorbing, 2);
for (t0 = 0; t0 <= 4; t0++) {
update_hash_point(proof[0 + t0 * 2], proof[1 + t0 * 2], absorbing, 5 + t0 * 3);
@@ -724,8 +724,8 @@ library RollupVerifier {
(t0, t1) = (ecc_mul_add_pm(m, proof, 1461486238301980199876269201563775120819706402602, t0, t1));
(t0, t1) = (
ecc_mul_add(
18701609130775737229348071043080155034023979562517390395403433088802478899758,
15966955543930185772599298905781740007968379271659670990460125132276790404701,
5335172776193682293002595672140655300498265857728161236987288793112411362256,
9153855726472286104461915396077745889260526805920405949557461469033032628222,
m[78],
t0,
t1
@@ -733,8 +733,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
10391672869328159104536012527288890078475214572275421477472198141744100604180,
16383182967525077486800851500412772270268328143041811261940514978333847876450,
9026202793013131831701482540600751978141377016764300618037152689098701087208,
19644677619301694001087044142922327551116787792977369058786364247421954485859,
m[77],
t0,
t1
@@ -742,8 +742,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
1694121668121560366967381814358868176695875056710903754887787227675156636991,
6288755472313871386012926867179622380057563139110460659328016508371672965822,
10826234859452509771814283128042282248838241144105945602706734900173561719624,
5628243352113405764051108388315822074832358861640908064883601198703833923438,
m[76],
t0,
t1
@@ -751,8 +751,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8449090587209846475328734419746789925412190193479844231777165308243174237722,
19620423218491500875965944829407986067794157844846402182805878618955604592848,
9833916648960859819503777242562918959056952519298917148524233826817297321072,
837915750759756490172805968793319594111899487492554675680829218939384285955,
m[75],
t0,
t1
@@ -760,8 +760,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
5053208336959682582031156680199539869251745263409434673229644546747696847142,
2515271708296970065769200367712058290268116287798438948140802173656220671206,
10257805671474982710489846158410183388099935223468876792311814484878286190506,
6925081619093494730602614238209964215162532591387952125009817011864359314464,
m[74],
t0,
t1
@@ -769,8 +769,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
14044565934581841113280816557133159251170886931106151374890478449607604267942,
4516676687937794780030405510740994119381246893674971835541700695978704585552,
4475887208248126488081900175351981014160135345959097965081514547035591501401,
17809934801579097157548855239127693133451078551727048660674021788322026074440,
m[73],
t0,
t1
@@ -823,8 +823,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
4919836553908828082540426444868776555669883964231731088484431671272015675682,
2534996469663628472218664436969797350677809756735321673130157881813913441609,
18539453526841971932568089122596968064597086391356856358866942118522457107863,
3647865108410881496134024808028560930237661296032155096209994441023206530212,
m[67],
t0,
t1
@@ -841,8 +841,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
7298741378311576950839968993357330108079245118485170808123459961337830256312,
10327561179499117619949936626306234488421661318541529469701192193684736307992,
19267653195273486172950176174654469275684545789180737280515385961619717720594,
8975180971271331994178632284567744253406636398050840906044549681238954521839,
m[65],
t0,
t1
@@ -859,8 +859,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
21344975294019301064497004820288763682448968861642019035490416932201272957274,
10527619823264344893410550194287064640208153251186939130321425213582959780489,
14883199025754033315476980677331963148201112555947054150371482532558947065890,
19913319410736467436640597337700981504577668548125107926660028143291852201132,
m[63],
t0,
t1
@@ -868,8 +868,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
8972742415650205333409282370033440562593431348747288268814492203356823531160,
8116706321112691122771049432546166822575953322170688547310064134261753771143,
18290509522533712126835038141804610778392690327965261406132668236833728306838,
3710298066677974093924183129147170087104393961634393354172472701713090868425,
m[62],
t0,
t1
@@ -877,8 +877,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
2245383788954722547301665173770198299224442299145553661157120655982065376923,
21429627532145565836455474503387893562363999035988060101286707048187310790834,
19363467492491917195052183458025198134822377737629876295496853723068679518308,
5854330679906778271391785925618350923591828884998994352880284635518306250788,
m[61],
t0,
t1
@@ -886,8 +886,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
6631831869726773976361406817204839637256208337970281843457872807848960103655,
9564029493986604546558813596663080644256762699468834511701525072767927949801,
16181109780595136982201896766265118193466959602989950846464420951358063185297,
8811570609098296287610981932552574275858846837699446990256241563674576678567,
m[60],
t0,
t1
@@ -895,8 +895,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
11480433023546787855799302686493624232665854025790899812568432142639901048711,
19408335616099148180409133533838326787843523379558500985213116784449716389602,
10062549363619405779400496848029040530770586215674909583260224093983878118724,
1989582851705118987083736605676322092152792129388805756040224163519806904905,
m[59],
t0,
t1
@@ -904,8 +904,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
17119009547436104907589161251911916154539209413889810725547125453954285498068,
16196009614025712805558792610177918739658373559330006740051047693948800191562,
19016883348721334939078393300672242978266248861945205052660538073783955572863,
1976040209279107904310062622264754919366006151976304093568644070161390236037,
m[58],
t0,
t1
@@ -922,8 +922,8 @@ library RollupVerifier {
);
(t0, t1) = (
ecc_mul_add(
18650010323993268535055713787599480879302828622769515272251129462854128226895,
11244246887388549559894193327128701737108444364011850111062992666532968469107,
10610438624239085062445835373411523076517149007370367578847561825933262473434,
14538778619212692682166219259545768162136692909816914624880992580957990166795,
m[56],
t0,
t1

View File

@@ -5,9 +5,9 @@ IMAGE_VERSION=latest
REPO_ROOT_DIR=./..
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
test:
@@ -15,10 +15,10 @@ test:
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
cp -r ../common/libzkp/interface ./verifier/lib
rm -rf ./verifier/lib && cp -r ../common/libzkp/interface ./verifier/lib
coordinator: libzkp ## Builds the Coordinator instance.
go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/coordinator ./cmd
test-verifier: libzkp
go test -tags ffi -timeout 0 -v ./verifier

View File

@@ -74,7 +74,7 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
go func() {
defer func() {
m.freeRoller(pubkey)
log.Info("roller unregister", "name", authMsg.Identity.Name)
log.Info("roller unregister", "name", authMsg.Identity.Name, "pubkey", pubkey)
}()
for {
@@ -82,14 +82,14 @@ func (m *Manager) Register(ctx context.Context, authMsg *message.AuthMsg) (*rpc.
case task := <-taskCh:
notifier.Notify(rpcSub.ID, task) //nolint
case err := <-rpcSub.Err():
log.Warn("client stopped the ws connection", "err", err)
log.Warn("client stopped the ws connection", "name", authMsg.Identity.Name, "pubkey", pubkey, "err", err)
return
case <-notifier.Closed():
return
}
}
}()
log.Info("roller register", "name", authMsg.Identity.Name, "version", authMsg.Identity.Version)
log.Info("roller register", "name", authMsg.Identity.Name, "pubkey", pubkey, "version", authMsg.Identity.Version)
return rpcSub, nil
}
@@ -116,6 +116,5 @@ func (m *Manager) SubmitProof(proof *message.ProofMsg) (bool, error) {
}
defer m.freeTaskIDForRoller(pubkey, proof.ID)
log.Info("Received zk proof", "proof id", proof.ID, "result", true)
return true, nil
}

File diff suppressed because one or more lines are too long

Binary file not shown.

View File

@@ -5,8 +5,6 @@ import (
"os"
"os/signal"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/urfave/cli/v2"
@@ -55,13 +53,8 @@ func action(ctx *cli.Context) error {
log.Crit("failed to init db connection", "err", err)
}
client, err := ethclient.Dial(cfg.L2Config.Endpoint)
if err != nil {
return err
}
// Initialize all coordinator modules.
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory, client)
rollerManager, err := coordinator.New(ctx.Context, cfg.RollerManagerConfig, ormFactory)
if err != nil {
return err
}

View File

@@ -15,5 +15,5 @@ func TestRunCoordinator(t *testing.T) {
// wait result
coordinator.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("coordinator version %s", version.Version))
coordinator.RunApp(false)
coordinator.RunApp(nil)
}

View File

@@ -11,10 +11,15 @@
}
},
"db_config": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
},
"l2_config": {
"endpoint": "/var/lib/jenkins/workspace/SequencerPipeline/MyPrivateNetwork/geth.ipc"
"persistence": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
},
"redis": {
"url": "redis://default:@localhost:6379/0",
"expirations": {
"trace": 3600
}
}
}
}

View File

@@ -24,17 +24,10 @@ type RollerManagerConfig struct {
TokenTimeToLive int `json:"token_time_to_live"`
}
// L2Config loads l2geth configuration items.
type L2Config struct {
// l2geth node url.
Endpoint string `json:"endpoint"`
}
// Config load configuration items.
type Config struct {
RollerManagerConfig *RollerManagerConfig `json:"roller_manager_config"`
DBConfig *db_config.DBConfig `json:"db_config"`
L2Config *L2Config `json:"l2_config"`
}
// VerifierConfig load zk verifier config.

View File

@@ -5,27 +5,24 @@ go 1.18
require (
github.com/orcaman/concurrent-map v1.0.0
github.com/patrickmn/go-cache v2.1.0+incompatible
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
golang.org/x/sync v0.1.0
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/deckarep/golang-set v1.8.0 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/go-ole/go-ole v1.2.6 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/google/uuid v1.3.0 // indirect
github.com/gorilla/websocket v1.5.0 // indirect
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/mattn/go-isatty v0.0.14 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
github.com/russross/blackfriday/v2 v2.1.0 // indirect
@@ -35,8 +32,8 @@ require (
github.com/tklauser/numcpus v0.4.0 // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -72,8 +72,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -105,7 +105,6 @@ github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r
github.com/dop251/goja v0.0.0-20211011172007-d99e4b8cbf48/go.mod h1:R9ET47fwRVRPZnOGvHxxhuZcbrMCuiqOz3Rlrh4KSnk=
github.com/dop251/goja_nodejs v0.0.0-20210225215109-d91c329300e7/go.mod h1:hn7BA7c8pLvoGndExHudxTDKZ84Pyvv+90pbBjbTz0Y=
github.com/eclipse/paho.mqtt.golang v1.2.0/go.mod h1:H9keYFcgq3Qr5OUJm/JZI/i6U7joQ8SYLhZwfeOo6Ts=
github.com/edsrzf/mmap-go v1.0.0 h1:CEBF7HpRnUCSJgGUb5h1Gm7e3VkmVDrR8lvWVLtrOFw=
github.com/edsrzf/mmap-go v1.0.0/go.mod h1:YO35OhQPt3KJa3ryjFM5Bs14WD66h8eGKpfaBNrHW5M=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
@@ -113,12 +112,10 @@ github.com/ethereum/go-ethereum v1.10.13/go.mod h1:W3yfrFyL9C1pHcwY5hmRHVDaorTiQ
github.com/ethereum/go-ethereum v1.10.26 h1:i/7d9RBBwiXCEuyduBQzJw/mKmnvzsN14jqBmytw72s=
github.com/ethereum/go-ethereum v1.10.26/go.mod h1:EYFyF19u3ezGLD4RqOkLq+ZCXzYbLoNDdZlMt7kyKFg=
github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5KwzbycvMj4=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5 h1:FtmdgXiUlNeRsoNMFlKLDt+S+6hbjVMEW6RGQ7aUf7c=
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff h1:tY80oXqGNY4FhTFhk+o9oFHGINQ/+vhlm8HFzi6znCI=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
github.com/getkin/kin-openapi v0.61.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
@@ -182,8 +179,6 @@ github.com/google/pprof v0.0.0-20190515194954-54271f7e092f/go.mod h1:zfwlbNMJ+OI
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.5/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I=
github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/gopherjs/gopherjs v0.0.0-20181017120253-0766667cb4d1/go.mod h1:wJfORRmW1u3UXTncJ5qlYoELFm8eSnnEO6hX4iZ3EWY=
@@ -192,19 +187,15 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWmnc=
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/graph-gophers/graphql-go v0.0.0-20201113091052-beb923fada29/go.mod h1:9CQHMSxwO4MprSdzoIEobiHpoLtHm77vfxsvsIN5Vuc=
github.com/hashicorp/go-bexpr v0.1.10 h1:9kuI5PFotCboP3dkDYFr/wi0gg0QVbSNz5oFRpxn4uE=
github.com/hashicorp/go-bexpr v0.1.10/go.mod h1:oxlubA2vC/gFVfX1A6JGp7ls7uCDlfJn732ehYYg+g0=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d h1:dg1dEPuWpEqDnvIw251EVy4zlP8gWbsGj4BsUKCRpYs=
github.com/hashicorp/golang-lru v0.5.5-0.20210104140557-80c98217689d/go.mod h1:iADmTwqILo4mZ8BN3D2Q6+9jd8WM5uGBxy+E8yxSoD4=
github.com/holiman/bloomfilter/v2 v2.0.3 h1:73e0e/V0tCydx14a0SCYS/EWCxgwLZ18CZcZKVu0fao=
github.com/holiman/bloomfilter/v2 v2.0.3/go.mod h1:zpoh+gs7qcpqrHr3dB55AMiJwo0iURXE7ZOP9L9hSkA=
github.com/holiman/uint256 v1.2.0 h1:gpSYcPLWGv4sG43I2mVLiDZCNDh/EpGjSk8tmtxitHM=
github.com/holiman/uint256 v1.2.0/go.mod h1:y4ga/t+u+Xwd7CpDgZESaRcWy0I7XMlTMA25ApIH5Jw=
github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU=
github.com/huin/goupnp v1.0.2/go.mod h1:0dxJBVBHqTMjIUMkESDTNgOOx/Mw5wYIfyFmdzSamkM=
github.com/huin/goupnp v1.0.3 h1:N8No57ls+MnjlB+JPiCVSOyy/ot7MJTqlo7rn+NYSqQ=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150/go.mod h1:PpLOETDnJ0o3iZrZfqZzyLl6l7F3c6L1oWn7OICBi6o=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/iden3/go-iden3-crypto v0.0.12/go.mod h1:swXIv0HFbJKobbQBtsB50G7IHr6PbTowutSew/iBEoo=
@@ -223,7 +214,6 @@ github.com/influxdata/roaring v0.4.13-0.20180809181101-fc520f41fab6/go.mod h1:bS
github.com/influxdata/tdigest v0.0.0-20181121200506-bf2b5ad3c0a9/go.mod h1:Js0mqiSBE6Ffsg94weZZ2c+v/ciT8QRHFOap7EKDrR0=
github.com/influxdata/usage-client v0.0.0-20160829180054-6d3895376368/go.mod h1:Wbbw6tYNvwa5dlB6304Sd+82Z3f7PmVZHVKU637d4po=
github.com/jackpal/go-nat-pmp v1.0.2-0.20160603034137-1fa385a6f458/go.mod h1:QPH045xvCAeXUZOxsnwmrtiCoxIr9eob+4orBN1SBKc=
github.com/jackpal/go-nat-pmp v1.0.2 h1:KzKSgb7qkJvOUTqYl9/Hg/me3pWgBmERKrTGD7BdWus=
github.com/jedisct1/go-minisign v0.0.0-20190909160543-45766022959e/go.mod h1:G1CVv03EnqU1wYL2dFwXxW2An0az9JTl/ZsqXQeBlkU=
github.com/jessevdk/go-flags v0.0.0-20141203071132-1679536dcc89/go.mod h1:4FA24M0QyGHXBuZZK/XkWh8h0e1EYbRYJSGM75WSRxI=
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
@@ -267,7 +257,6 @@ github.com/matryer/moq v0.0.0-20190312154309-6cfb0558e1bd/go.mod h1:9ELz6aaclSIG
github.com/mattn/go-colorable v0.0.9/go.mod h1:9vuHe8Xs5qXnSaW/c/ABM9alt+Vo+STaOChaDxuIBZU=
github.com/mattn/go-colorable v0.1.2/go.mod h1:U0ppj6V5qS13XJ6of8GYAs25YV2eR4EVcfRqFIhoBtE=
github.com/mattn/go-colorable v0.1.7/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-colorable v0.1.8 h1:c1ghPdyEDarC70ftn0y+A/Ee++9zz8ljHG1b13eJ0s8=
github.com/mattn/go-colorable v0.1.8/go.mod h1:u6P/XSegPjTcexA+o6vUJrdnUu04hMope9wVRipJSqc=
github.com/mattn/go-ieproxy v0.0.0-20190610004146-91bb50d98149/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
github.com/mattn/go-ieproxy v0.0.0-20190702010315-6dee0af9227d/go.mod h1:31jz6HNzdxOmlERGGEc4v/dMssOfmp2p5bT/okiKFFc=
@@ -275,8 +264,6 @@ github.com/mattn/go-isatty v0.0.4/go.mod h1:M+lRXTBqGeGNdLjl/ufCoiOlB5xdOkqRJdNx
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU=
github.com/mattn/go-isatty v0.0.14 h1:yVuAays6BHfxijgZPzw+3Zlu5yQgKGP2/hcQbHb7S9Y=
github.com/mattn/go-isatty v0.0.14/go.mod h1:7GGIvUiUoEMVVmxf/4nioHXj79iQHKdU27kJ6hsGG94=
github.com/mattn/go-runewidth v0.0.3/go.mod h1:LwmH8dsx7+W8Uxz3IHJYH5QSwggIsqBzpuz5H//U1FU=
github.com/mattn/go-runewidth v0.0.9 h1:Lm995f3rfxdpd6TSmuVCHVb/QhupuXlYr8sCI/QdE+0=
github.com/mattn/go-runewidth v0.0.9/go.mod h1:H031xJmbD/WCDINGzjvQ9THkh0rPKHF+m2gUSrubnMI=
@@ -284,9 +271,6 @@ github.com/mattn/go-sqlite3 v1.11.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsO
github.com/mattn/go-tty v0.0.0-20180907095812-13ff1204f104/go.mod h1:XPvLUNfbS4fJH25nqRHfWLMa1ONC8Amw+mIA639KxkE=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/pointerstructure v1.2.0 h1:O+i9nHnXS3l/9Wu7r4NrEdwA2VFTicjUEN1uBnDo34A=
github.com/mitchellh/pointerstructure v1.2.0/go.mod h1:BRAsLI5zgXmw97Lf6s25bs8ohIXc3tViBH44KcwB2g4=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
@@ -338,19 +322,17 @@ github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsT
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1 h1:CLCKso/QK1snAlnhNR/CNvNiFU2saUtjV0bx3EwNeCE=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XFkP+Eg=
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -368,11 +350,11 @@ github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasO
github.com/spf13/cast v1.3.0/go.mod h1:Qx5cxh0v+4UWYiBimWS+eyWzqEqokIECu5etghLkUJE=
github.com/spf13/cobra v0.0.3/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/pflag v1.0.3/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4 h1:Gb2Tyox57NRNuZ2d3rmvB3pcmbu7O1RS3m8WRx7ilrg=
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZLeN1LMWmRsyYjvAu+I6Dm9QmlDaIIt+Y+4Kd7Tp+Q=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -380,8 +362,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -391,7 +374,6 @@ github.com/tklauser/go-sysconf v0.3.10/go.mod h1:C8XykCvCb+Gn0oNCWPIlcb0RuglQTYa
github.com/tklauser/numcpus v0.2.2/go.mod h1:x3qojaO3uyYt0i56EW/VUYs7uBvdl2fkfZFu0T9wgjM=
github.com/tklauser/numcpus v0.4.0 h1:E53Dm1HjH1/R2/aoCtXtPgzmElmn51aOkhCFSuZq//o=
github.com/tklauser/numcpus v0.4.0/go.mod h1:1+UI3pD8NW14VMwdgJNJ1ESk2UnwhAnz5hMwiKKqXCQ=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef h1:wHSqTBrZW24CsNJDfeh9Ex6Pm0Rcpc7qrgKBiL44vF4=
github.com/tyler-smith/go-bip39 v1.0.1-0.20181017060643-dbb3b84ba2ef/go.mod h1:sJ5fKU0s6JVwZjjcUEX2zFOnvq0ASQ2K9Zr6cf67kNs=
github.com/urfave/cli/v2 v2.3.0/go.mod h1:LJmUH05zAU44vOAcrfzZQKsZbVcdbOG8rtL3/XcUArI=
github.com/urfave/cli/v2 v2.10.2 h1:x3p8awjp/2arX+Nl/G2040AZpOCHS/eMJJ1/a+mye4Y=
@@ -424,8 +406,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -534,14 +516,13 @@ golang.org/x/sys v0.0.0-20210324051608-47abb6519492/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210420205809-ac73e9fd8988/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -553,11 +534,9 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.5.0 h1:OLmvp0KP+FVG99Ct/qFiL/Fhk4zp4QQnZ7b2U+5piUM=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba h1:O8mE0/t419eoIwhTFpKVkHiTs/Igowgfkj25AcZrtiE=
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/tools v0.0.0-20180525024113-a5b4c53f6e8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
@@ -644,7 +623,6 @@ gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce h1:+JknDZhAj8YMt7
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0 h1:NdAVW6RYxDif9DhDHaAortIu956m2c0v+09AZBPTbE0=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -11,9 +11,6 @@ import (
cmap "github.com/orcaman/concurrent-map"
"github.com/patrickmn/go-cache"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/ethclient"
"github.com/scroll-tech/go-ethereum/log"
"github.com/scroll-tech/go-ethereum/rpc"
@@ -65,7 +62,6 @@ type Manager struct {
// A map containing proof failed or verify failed proof.
rollerPool cmap.ConcurrentMap
// TODO: once put into use, should add to graceful restart.
failedSessionInfos map[string]*SessionInfo
// A direct connection to the Halo2 verifier, used to verify
@@ -75,9 +71,6 @@ type Manager struct {
// db interface
orm database.OrmFactory
// l2geth client
*ethclient.Client
// Token cache
tokenCache *cache.Cache
// A mutex guarding registration
@@ -86,7 +79,7 @@ type Manager struct {
// New returns a new instance of Manager. The instance will be not fully prepared,
// and still needs to be finalized and ran by calling `manager.Start`.
func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmFactory, client *ethclient.Client) (*Manager, error) {
func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmFactory) (*Manager, error) {
v, err := verifier.NewVerifier(cfg.Verifier)
if err != nil {
return nil, err
@@ -101,7 +94,6 @@ func New(ctx context.Context, cfg *config.RollerManagerConfig, orm database.OrmF
failedSessionInfos: make(map[string]*SessionInfo),
verifier: v,
orm: orm,
Client: client,
tokenCache: cache.New(time.Duration(cfg.TokenTimeToLive)*time.Second, 1*time.Hour),
}, nil
}
@@ -195,8 +187,18 @@ func (m *Manager) restorePrevSessions() {
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
m.sessions[sess.info.ID] = sess
log.Info("Coordinator restart reload sessions", "ID", sess.info.ID, "sess", sess.info)
go m.CollectProofs(sess.info.ID, sess)
log.Info("Coordinator restart reload sessions", "session start time", time.Unix(sess.info.StartTimestamp, 0))
for _, roller := range sess.info.Rollers {
log.Info(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
go m.CollectProofs(sess)
}
}
}
@@ -220,17 +222,29 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
proofTimeSec := uint64(time.Since(time.Unix(sess.info.StartTimestamp, 0)).Seconds())
// Ensure this roller is eligible to participate in the session.
if roller, ok := sess.info.Rollers[pk]; !ok {
return fmt.Errorf("roller %s is not eligible to partake in proof session %v", pk, msg.ID)
} else if roller.Status == orm.RollerProofValid {
roller, ok := sess.info.Rollers[pk]
if !ok {
return fmt.Errorf("roller %s (%s) is not eligible to partake in proof session %v", roller.Name, roller.PublicKey, msg.ID)
}
if roller.Status == orm.RollerProofValid {
// In order to prevent DoS attacks, it is forbidden to repeatedly submit valid proofs.
// TODO: Defend invalid proof resubmissions by one of the following two methods:
// (i) slash the roller for each submission of invalid proof
// (ii) set the maximum failure retry times
log.Warn("roller has already submitted valid proof in proof session", "roller", pk, "proof id", msg.ID)
log.Warn(
"roller has already submitted valid proof in proof session",
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"proof id", msg.ID,
)
return nil
}
log.Info("Received zk proof", "proof id", msg.ID)
log.Info(
"handling zk proof",
"proof id", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
)
defer func() {
// TODO: maybe we should use db tx for the whole process?
@@ -250,12 +264,13 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
}()
if msg.Status != message.StatusOk {
log.Error("Roller failed to generate proof", "msg.ID", msg.ID, "error", msg.Error)
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskFailed); dbErr != nil {
log.Error("failed to update task status as failed", "error", dbErr)
}
// record the failed session.
m.addFailedSession(sess, msg.Error)
log.Error(
"Roller failed to generate proof",
"msg.ID", msg.ID,
"roller name", roller.Name,
"roller pk", roller.PublicKey,
"error", msg.Error,
)
return nil
}
@@ -280,8 +295,6 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
success, err = m.verifier.VerifyProof(msg.Proof)
if err != nil {
// record failed session.
m.addFailedSession(sess, err.Error())
// TODO: this is only a temp workaround for testnet, we should return err in real cases
success = false
log.Error("Failed to verify zk proof", "proof id", msg.ID, "error", err)
@@ -290,35 +303,31 @@ func (m *Manager) handleZkProof(pk string, msg *message.ProofDetail) error {
log.Info("Verify zk proof successfully", "verification result", success, "proof id", msg.ID)
}
var status orm.ProvingStatus
if success {
status = orm.ProvingTaskVerified
} else {
// Set status as skipped if verification fails.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
status = orm.ProvingTaskFailed
if dbErr = m.orm.UpdateProvingStatus(msg.ID, orm.ProvingTaskVerified); dbErr != nil {
log.Error(
"failed to update proving_status",
"msg.ID", msg.ID,
"status", orm.ProvingTaskVerified,
"error", dbErr)
}
return dbErr
}
if dbErr = m.orm.UpdateProvingStatus(msg.ID, status); dbErr != nil {
log.Error("failed to update proving_status", "msg.ID", msg.ID, "status", status, "error", dbErr)
}
return dbErr
return nil
}
// CollectProofs collects proofs corresponding to a proof generation session.
func (m *Manager) CollectProofs(id string, sess *session) {
timer := time.NewTimer(time.Duration(m.cfg.CollectionTime) * time.Minute)
func (m *Manager) CollectProofs(sess *session) {
for {
select {
case <-timer.C:
case <-time.After(time.Duration(m.cfg.CollectionTime) * time.Minute):
m.mu.Lock()
// Ensure proper clean-up of resources.
defer func() {
delete(m.sessions, id)
// TODO: remove the clean-up, rollers report healthy status.
for pk := range sess.info.Rollers {
m.freeTaskIDForRoller(pk, sess.info.ID)
}
delete(m.sessions, sess.info.ID)
m.mu.Unlock()
}()
@@ -335,13 +344,13 @@ func (m *Manager) CollectProofs(id string, sess *session) {
// record failed session.
errMsg := "proof generation session ended without receiving any valid proofs"
m.addFailedSession(sess, errMsg)
log.Warn(errMsg, "session id", id)
log.Warn(errMsg, "session id", sess.info.ID)
// Set status as skipped.
// Note that this is only a workaround for testnet here.
// TODO: In real cases we should reset to orm.ProvingTaskUnassigned
// so as to re-distribute the task in the future
if err := m.orm.UpdateProvingStatus(id, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", id, "err", err)
if err := m.orm.UpdateProvingStatus(sess.info.ID, orm.ProvingTaskFailed); err != nil {
log.Error("fail to reset task_status as Unassigned", "id", sess.info.ID, "err", err)
}
return
}
@@ -351,17 +360,32 @@ func (m *Manager) CollectProofs(id string, sess *session) {
_ = participatingRollers[randIndex]
// TODO: reward winner
return
case ret := <-sess.finishChan:
m.mu.Lock()
sess.info.Rollers[ret.pk].Status = ret.status
m.mu.Unlock()
if m.isSessionFailed(sess.info) {
if err := m.orm.UpdateProvingStatus(ret.id, orm.ProvingTaskFailed); err != nil {
log.Error("failed to update proving_status as failed", "msg.ID", ret.id, "error", err)
}
}
if err := m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "pk", ret.pk, "error", err)
}
m.mu.Unlock()
}
}
}
func (m *Manager) isSessionFailed(info *orm.SessionInfo) bool {
for _, roller := range info.Rollers {
if roller.Status != orm.RollerProofInvalid {
return false
}
}
return true
}
// APIs collect API services.
func (m *Manager) APIs() []rpc.API {
return []rpc.API{
@@ -380,12 +404,12 @@ func (m *Manager) APIs() []rpc.API {
// StartProofGenerationSession starts a proof generation session
func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success bool) {
roller := m.selectRoller()
if roller == nil {
if m.GetNumberOfIdleRollers() == 0 {
log.Warn("no idle roller when starting proof generation session", "id", task.ID)
return false
}
log.Info("start proof generation session", "id", task.ID)
log.Info("start proof generation session", "id", task.ID)
defer func() {
if !success {
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskUnassigned); err != nil {
@@ -393,70 +417,70 @@ func (m *Manager) StartProofGenerationSession(task *orm.BlockBatch) (success boo
}
}
}()
if err := m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
// Get traces by batch id.
traces, err := m.orm.GetBlockTraces(map[string]interface{}{"batch_id": task.ID})
if err != nil {
log.Error("failed to get block traces", "batch_id", task.ID, "error", err)
return false
}
// Dispatch task to rollers.
rollers := make(map[string]*orm.RollerStatus)
for i := 0; i < int(m.cfg.RollersPerSession); i++ {
roller := m.selectRoller()
if roller == nil {
log.Info("selectRoller returns nil")
break
}
log.Info("roller is picked", "session id", task.ID, "name", roller.Name, "public key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(task.ID, traces) {
log.Error("send task failed", "roller name", roller.Name, "public key", roller.PublicKey, "id", task.ID)
continue
}
rollers[roller.PublicKey] = &orm.RollerStatus{PublicKey: roller.PublicKey, Name: roller.Name, Status: orm.RollerAssigned}
}
// No roller assigned.
if len(rollers) == 0 {
log.Error("no roller assigned", "id", task.ID, "number of idle rollers", m.GetNumberOfIdleRollers())
return false
}
// Update session proving status as assigned.
if err = m.orm.UpdateProvingStatus(task.ID, orm.ProvingTaskAssigned); err != nil {
log.Error("failed to update task status", "id", task.ID, "err", err)
return false
}
blockInfos, err := m.orm.GetBlockInfos(map[string]interface{}{"batch_id": task.ID})
if err != nil {
log.Error(
"could not GetBlockInfos",
"batch_id", task.ID,
"error", err,
)
return false
}
traces := make([]*types.BlockTrace, len(blockInfos))
for i, blockInfo := range blockInfos {
traces[i], err = m.Client.GetBlockTraceByHash(m.ctx, common.HexToHash(blockInfo.Hash))
if err != nil {
log.Error(
"could not GetBlockTraceByNumber",
"block number", blockInfo.Number,
"block hash", blockInfo.Hash,
"error", err,
)
return false
}
}
log.Info("roller is picked", "name", roller.Name, "public_key", roller.PublicKey)
// send trace to roller
if !roller.sendTask(task.ID, traces) {
log.Error("send task failed", "roller name", roller.Name, "id", task.ID)
return false
}
pk := roller.PublicKey
// Create a proof generation session.
s := &session{
sess := &session{
info: &orm.SessionInfo{
ID: task.ID,
Rollers: map[string]*orm.RollerStatus{
pk: {
PublicKey: pk,
Name: roller.Name,
Status: orm.RollerAssigned,
},
},
ID: task.ID,
Rollers: rollers,
StartTimestamp: time.Now().Unix(),
},
finishChan: make(chan rollerProofStatus, proofAndPkBufferSize),
}
// Store session info.
if err = m.orm.SetSessionInfo(s.info); err != nil {
log.Error("db set session info fail", "pk", pk, "error", err)
if err = m.orm.SetSessionInfo(sess.info); err != nil {
log.Error("db set session info fail", "error", err)
for _, roller := range sess.info.Rollers {
log.Error(
"restore roller info for session",
"session id", sess.info.ID,
"roller name", roller.Name,
"public key", roller.PublicKey,
"proof status", roller.Status)
}
return false
}
m.mu.Lock()
m.sessions[task.ID] = s
m.sessions[task.ID] = sess
m.mu.Unlock()
go m.CollectProofs(task.ID, s)
go m.CollectProofs(sess)
return true
}

View File

@@ -36,11 +36,13 @@ import (
)
var (
cfg *bridge_config.Config
dbImg docker.ImgInstance
dbImg docker.ImgInstance
redisImg docker.ImgInstance
cfg *bridge_config.Config
)
func randomUrl() string {
func randomURL() string {
id, _ := rand.Int(rand.Reader, big.NewInt(2000-1))
return fmt.Sprintf("localhost:%d", 10000+2000+id.Int64())
}
@@ -51,12 +53,25 @@ func setEnv(t *testing.T) (err error) {
assert.NoError(t, err)
// Create db container.
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.DriverName)
cfg.DBConfig.DSN = dbImg.Endpoint()
dbImg = docker.NewTestDBDocker(t, cfg.DBConfig.Persistence.DriverName)
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
// Create redis container.
redisImg = docker.NewTestRedisDocker(t)
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
return
}
func free(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
}
func TestApis(t *testing.T) {
// Set up the test environment.
assert.True(t, assert.NoError(t, setEnv(t)), "failed to setup the test environment.")
@@ -64,6 +79,8 @@ func TestApis(t *testing.T) {
t.Run("TestHandshake", testHandshake)
t.Run("TestFailedHandshake", testFailedHandshake)
t.Run("TestSeveralConnections", testSeveralConnections)
t.Run("TestValidProof", testValidProof)
t.Run("TestInvalidProof", testInvalidProof)
t.Run("TestIdleRollerSelection", testIdleRollerSelection)
// TODO: Restart roller alone when received task, can add this test case in integration-test.
//t.Run("TestRollerReconnect", testRollerReconnect)
@@ -71,7 +88,7 @@ func TestApis(t *testing.T) {
// Teardown
t.Cleanup(func() {
dbImg.Stop()
free(t)
})
}
@@ -83,8 +100,8 @@ func testHandshake(t *testing.T) {
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomUrl()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -104,8 +121,8 @@ func testFailedHandshake(t *testing.T) {
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomUrl()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -127,7 +144,7 @@ func testFailedHandshake(t *testing.T) {
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: name,
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -145,7 +162,7 @@ func testFailedHandshake(t *testing.T) {
authMsg = &message.AuthMsg{
Identity: &message.Identity{
Name: name,
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
assert.NoError(t, authMsg.Sign(privkey))
@@ -170,8 +187,8 @@ func testSeveralConnections(t *testing.T) {
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomUrl()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -215,6 +232,124 @@ func testSeveralConnections(t *testing.T) {
}
}
}
func testValidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 3)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
// only roller 0 submits valid proof.
rollers[i].waitTaskAndSendProof(t, time.Second, false, i == 0)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
var ids = make([]string, 1)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
for i := range ids {
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
assert.NoError(t, err)
ids[i] = ID
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(ids) > 0 {
select {
case <-tick:
status, err := l2db.GetProvingStatusByID(ids[0])
assert.NoError(t, err)
if status == orm.ProvingTaskVerified {
ids = ids[1:]
}
case <-tickStop:
t.Error("failed to check proof status")
return
}
}
}
func testInvalidProof(t *testing.T) {
// Create db handler and reset db.
l2db, err := database.NewOrmFactory(cfg.DBConfig)
assert.NoError(t, err)
assert.NoError(t, migrate.ResetDB(l2db.GetDB().DB))
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 3, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
}()
// create mock rollers.
rollers := make([]*mockRoller, 3)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
rollers[i].waitTaskAndSendProof(t, time.Second, false, false)
}
defer func() {
// close connection
for _, roller := range rollers {
roller.close()
}
}()
assert.Equal(t, 3, rollerManager.GetNumberOfIdleRollers())
var ids = make([]string, 1)
dbTx, err := l2db.Beginx()
assert.NoError(t, err)
for i := range ids {
ID, err := l2db.NewBatchInDBTx(dbTx, &orm.BlockInfo{Number: uint64(i)}, &orm.BlockInfo{Number: uint64(i)}, "0f", 1, 194676)
assert.NoError(t, err)
ids[i] = ID
}
assert.NoError(t, dbTx.Commit())
// verify proof status
var (
tick = time.Tick(500 * time.Millisecond)
tickStop = time.Tick(10 * time.Second)
)
for len(ids) > 0 {
select {
case <-tick:
status, err := l2db.GetProvingStatusByID(ids[0])
assert.NoError(t, err)
if status == orm.ProvingTaskFailed {
ids = ids[1:]
}
case <-tickStop:
t.Error("failed to check proof status")
return
}
}
}
func testIdleRollerSelection(t *testing.T) {
// Create db handler and reset db.
@@ -224,8 +359,8 @@ func testIdleRollerSelection(t *testing.T) {
defer l2db.Close()
// Setup coordinator and ws server.
wsURL := "ws://" + randomUrl()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
handler.Shutdown(context.Background())
rollerManager.Stop()
@@ -235,7 +370,7 @@ func testIdleRollerSelection(t *testing.T) {
rollers := make([]*mockRoller, 20)
for i := 0; i < len(rollers); i++ {
rollers[i] = newMockRoller(t, "roller_test"+strconv.Itoa(i), wsURL)
rollers[i].waitTaskAndSendProof(t, time.Second, false)
rollers[i].waitTaskAndSendProof(t, time.Second, false, true)
}
defer func() {
// close connection
@@ -293,13 +428,13 @@ func testGracefulRestart(t *testing.T) {
assert.NoError(t, dbTx.Commit())
// Setup coordinator and ws server.
wsURL := "ws://" + randomUrl()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, wsURL)
wsURL := "ws://" + randomURL()
rollerManager, handler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
// create mock roller
roller := newMockRoller(t, "roller_test", wsURL)
// wait 10 seconds, coordinator restarts before roller submits proof
roller.waitTaskAndSendProof(t, 10*time.Second, false)
roller.waitTaskAndSendProof(t, 10*time.Second, false, true)
// wait for coordinator to dispatch task
<-time.After(5 * time.Second)
@@ -311,7 +446,7 @@ func testGracefulRestart(t *testing.T) {
rollerManager.Stop()
// Setup new coordinator and ws server.
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, wsURL)
newRollerManager, newHandler := setupCoordinator(t, cfg.DBConfig, 1, wsURL)
defer func() {
newHandler.Shutdown(context.Background())
newRollerManager.Stop()
@@ -329,7 +464,7 @@ func testGracefulRestart(t *testing.T) {
}
// will overwrite the roller client for `SubmitProof`
roller.waitTaskAndSendProof(t, time.Millisecond*500, true)
roller.waitTaskAndSendProof(t, time.Millisecond*500, true, true)
defer roller.close()
// verify proof status
@@ -355,17 +490,17 @@ func testGracefulRestart(t *testing.T) {
}
}
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
func setupCoordinator(t *testing.T, dbCfg *database.DBConfig, rollersPerSession uint8, wsURL string) (rollerManager *coordinator.Manager, handler *http.Server) {
// Get db handler.
db, err := database.NewOrmFactory(dbCfg)
assert.True(t, assert.NoError(t, err), "failed to get db handler.")
rollerManager, err = coordinator.New(context.Background(), &coordinator_config.RollerManagerConfig{
RollersPerSession: 1,
RollersPerSession: rollersPerSession,
Verifier: &coordinator_config.VerifierConfig{MockMode: true},
CollectionTime: 1,
TokenTimeToLive: 5,
}, db, nil)
}, db)
assert.NoError(t, err)
assert.NoError(t, rollerManager.Start())
@@ -419,7 +554,7 @@ func (r *mockRoller) connectToCoordinator() (*client2.Client, ethereum.Subscript
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.rollerName,
Timestamp: time.Now().UnixNano(),
Timestamp: uint32(time.Now().Unix()),
},
}
_ = authMsg.Sign(r.privKey)
@@ -448,7 +583,7 @@ func (r *mockRoller) releaseTasks() {
}
// Wait for the proof task, after receiving the proof task, roller submits proof after proofTime secs.
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool) {
func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration, reconnect bool, validProof bool) {
// simulating the case that the roller first disconnects and then reconnects to the coordinator
// the Subscription and its `Err()` channel will be closed, and the coordinator will `freeRoller()`
if reconnect {
@@ -464,10 +599,10 @@ func (r *mockRoller) waitTaskAndSendProof(t *testing.T, proofTime time.Duration,
r.releaseTasks()
r.stopCh = make(chan struct{})
go r.loop(t, r.client, proofTime, r.stopCh)
go r.loop(t, r.client, proofTime, validProof, r.stopCh)
}
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, stopCh chan struct{}) {
func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.Duration, validProof bool, stopCh chan struct{}) {
for {
select {
case task := <-r.taskCh:
@@ -485,6 +620,9 @@ func (r *mockRoller) loop(t *testing.T, client *client2.Client, proofTime time.D
Proof: &message.AggProof{},
},
}
if !validProof {
proof.Status = message.StatusProofError
}
assert.NoError(t, proof.Sign(r.privKey))
ok, err := client.SubmitProof(context.Background(), proof)
assert.NoError(t, err)

View File

@@ -41,7 +41,7 @@ func (r *rollerNode) sendTask(id string, traces []*types.BlockTrace) bool {
}:
r.TaskIDs.Set(id, struct{}{})
default:
log.Warn("roller channel is full")
log.Warn("roller channel is full", "roller name", r.Name, "public key", r.PublicKey)
return false
}
return true
@@ -77,6 +77,7 @@ func (m *Manager) register(pubkey string, identity *message.Identity) (<-chan *m
roller := node.(*rollerNode)
// avoid reconnection too frequently.
if time.Since(roller.registerTime) < 60 {
log.Warn("roller reconnect too frequently", "roller_name", identity.Name, "public key", pubkey)
return nil, fmt.Errorf("roller reconnect too frequently")
}
// update register time and status
@@ -105,17 +106,18 @@ func (m *Manager) freeTaskIDForRoller(pk string, id string) {
}
// GetNumberOfIdleRollers return the count of idle rollers.
func (m *Manager) GetNumberOfIdleRollers() int {
pubkeys := m.rollerPool.Keys()
for i := 0; i < len(pubkeys); i++ {
if val, ok := m.rollerPool.Get(pubkeys[i]); ok {
func (m *Manager) GetNumberOfIdleRollers() (count int) {
for i, pk := range m.rollerPool.Keys() {
if val, ok := m.rollerPool.Get(pk); ok {
r := val.(*rollerNode)
if r.TaskIDs.Count() > 0 {
pubkeys[i], pubkeys = pubkeys[len(pubkeys)-1], pubkeys[:len(pubkeys)-1]
if r.TaskIDs.Count() == 0 {
count++
}
} else {
log.Error("rollerPool Get fail", "pk", pk, "idx", i, "pk len", pk)
}
}
return len(pubkeys)
return count
}
func (m *Manager) selectRoller() *rollerNode {
@@ -127,6 +129,8 @@ func (m *Manager) selectRoller() *rollerNode {
if r.TaskIDs.Count() == 0 {
return r
}
} else {
log.Error("rollerPool Get fail", "pk", pubkeys[idx.Int64()], "idx", idx.Int64(), "pk len", len(pubkeys))
}
pubkeys[idx.Int64()], pubkeys = pubkeys[0], pubkeys[1:]
}

139
database/cache/fastcache.go vendored Normal file
View File

@@ -0,0 +1,139 @@
package cache
import (
"context"
"encoding/json"
"fmt"
"github.com/VictoriaMetrics/fastcache"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"math/big"
"sync"
"time"
)
var (
defFastConfig = &FastConfig{
CacheSize: 64 * 1024 * 1024,
TraceExpireSec: 3600,
}
)
type FastConfig struct {
CacheSize int
TraceExpireSec int64
}
type FastCache struct {
// trace expire time second.
traceExpireSec int64
// fast cache handler.
cache *fastcache.Cache
// Handle all the key and it's insert time.
expireTime sync.Map
}
func NewFastCache(ctx context.Context, cfg *FastConfig) Cache {
if cfg == nil {
cfg = defFastConfig
}
cache := &FastCache{
cache: fastcache.New(cfg.CacheSize),
traceExpireSec: cfg.TraceExpireSec,
}
go cache.loop(ctx)
return cache
}
func (f *FastCache) loop(ctx context.Context) {
tick := time.NewTicker(time.Second * 10)
for {
select {
case <-ctx.Done():
// Free cache.
f.cache.Reset()
return
case <-tick.C:
f.expireTime.Range(func(key, value any) bool {
number := key.(*big.Int)
if f.isExpired(number) {
// Delete key in expire map.
f.freeTrace(number)
}
return true
})
}
}
}
func (f *FastCache) ExistTrace(ctx context.Context, number *big.Int) (bool, error) {
// Get hash by number.
if f.isExpired(number) {
f.freeTrace(number)
return false, nil
}
return true, nil
}
func (f *FastCache) GetBlockTrace(ctx context.Context, hash common.Hash) (*types.BlockTrace, error) {
data := f.cache.Get(nil, hash.Bytes())
if data == nil {
return nil, fmt.Errorf("trace if not stored in fastcache, hash: %s", hash.String())
}
var trace = &types.BlockTrace{}
return trace, json.Unmarshal(data, &trace)
}
func (f *FastCache) SetBlockTrace(ctx context.Context, trace *types.BlockTrace) error {
hash := trace.Header.Hash()
number := trace.Header.Number
// The trace is not expired, don't need to reset.
if !f.isExpired(number) {
return nil
} else {
// Try to delete the fragment data.
f.freeTrace(number)
}
// Unmarshal trace.
data, err := json.Marshal(trace)
if err != nil {
return err
}
// Get time stamp(second unit).
curSec := time.Now().Unix()
// Set expire time.
f.expireTime.Store(number, curSec)
// Set index of number to hash.
f.cache.Set(number.Bytes(), hash.Bytes())
// Set trace content in memory cache.
f.cache.Set(hash.Bytes(), data)
return nil
}
// If the trace is expired delete the number and
func (f *FastCache) freeTrace(number *big.Int) {
val := f.cache.Get(nil, number.Bytes())
if val == nil {
return
}
// delete number index.
f.cache.Del(number.Bytes())
// delete trace content.
f.cache.Del(val)
// delete expire key.
f.expireTime.Delete(number)
}
func (f *FastCache) isExpired(key interface{}) bool {
curSec := time.Now().Unix()
val, ok := f.expireTime.Load(key)
if ok {
return curSec-val.(int64) > f.traceExpireSec
}
return false
}

25
database/cache/fastcache_test.go vendored Normal file
View File

@@ -0,0 +1,25 @@
package cache_test
import (
"context"
"encoding/json"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"os"
"scroll-tech/database/cache"
"testing"
)
func TestFastCache(t *testing.T) {
fdb := cache.NewFastCache(context.Background(), &cache.FastConfig{})
var (
data []byte
trace = &types.BlockTrace{}
)
data, err := os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
assert.NoError(t, json.Unmarshal(data, &trace))
assert.NoError(t, fdb.SetBlockTrace(context.Background(), trace))
}

16
database/cache/interface.go vendored Normal file
View File

@@ -0,0 +1,16 @@
package cache
import (
"context"
"math/big"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
)
// Cache cache common interface.
type Cache interface {
ExistTrace(ctx context.Context, number *big.Int) (bool, error)
GetBlockTrace(ctx context.Context, hash common.Hash) (*types.BlockTrace, error)
SetBlockTrace(ctx context.Context, trace *types.BlockTrace) error
}

104
database/cache/redis.go vendored Normal file
View File

@@ -0,0 +1,104 @@
package cache
import (
"context"
"encoding/json"
"math/big"
"time"
"github.com/scroll-tech/go-ethereum/common"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/redis/go-redis/v9"
)
// RedisConfig redis cache config.
type RedisConfig struct {
URL string `json:"url"`
Mode string `json:"mode,omitempty"`
Expirations map[string]int64 `json:"expirations,omitempty"`
}
// RedisClientWrapper handle redis client and some expires.
type RedisClientWrapper struct {
client redisClient
traceExpire time.Duration
}
// redisClient wrap around single-redis-node / redis-cluster
type redisClient interface {
Exists(context.Context, ...string) *redis.IntCmd
Set(context.Context, string, interface{}, time.Duration) *redis.StatusCmd
Get(context.Context, string) *redis.StringCmd
}
// NewRedisClientWrapper create a redis client and become Cache interface.
func NewRedisClientWrapper(redisConfig *RedisConfig) (Cache, error) {
var traceExpire = time.Second * 60
if val, exist := redisConfig.Expirations["trace"]; exist {
traceExpire = time.Duration(val) * time.Second
}
if redisConfig.Mode == "cluster" {
op, err := redis.ParseClusterURL(redisConfig.URL)
if err != nil {
return nil, err
}
return &RedisClientWrapper{
client: redis.NewClusterClient(op),
traceExpire: traceExpire,
}, nil
}
op, err := redis.ParseURL(redisConfig.URL)
if err != nil {
return nil, err
}
return &RedisClientWrapper{
client: redis.NewClient(op),
traceExpire: traceExpire,
}, nil
}
// ExistTrace check the trace is exist or not.
func (r *RedisClientWrapper) ExistTrace(ctx context.Context, number *big.Int) (bool, error) {
n, err := r.client.Exists(ctx, number.String()).Result()
return err == nil && n > 0, err
}
// SetBlockTrace Set trace to redis.
func (r *RedisClientWrapper) SetBlockTrace(ctx context.Context, trace *types.BlockTrace) (setErr error) {
hash, number := trace.Header.Hash().String(), trace.Header.Number.String()
// If return error or the trace is exist return this function.
n, err := r.client.Exists(ctx, hash).Result()
if err != nil || n > 0 {
return err
}
// Set trace expire time.
defer func() {
if setErr == nil {
r.client.Set(ctx, number, hash, r.traceExpire)
}
}()
var data []byte
data, setErr = json.Marshal(trace)
if setErr != nil {
return setErr
}
return r.client.Set(ctx, hash, data, r.traceExpire).Err()
}
// GetBlockTrace get block trace by number, hash.
func (r *RedisClientWrapper) GetBlockTrace(ctx context.Context, hash common.Hash) (*types.BlockTrace, error) {
// Get trace content.
data, err := r.client.Get(ctx, hash.String()).Bytes()
if err != nil {
return nil, err
}
// Unmarshal trace and return result.
var trace types.BlockTrace
return &trace, json.Unmarshal(data, &trace)
}

46
database/cache/redis_test.go vendored Normal file
View File

@@ -0,0 +1,46 @@
package cache_test
import (
"context"
"encoding/json"
"os"
"testing"
"time"
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/stretchr/testify/assert"
"scroll-tech/common/docker"
"scroll-tech/database/cache"
)
func TestRedisCache(t *testing.T) {
redisImg := docker.NewTestRedisDocker(t)
defer redisImg.Stop()
rdb, err := cache.NewRedisClientWrapper(&cache.RedisConfig{
URL: redisImg.Endpoint(),
Expirations: map[string]int64{
"trace": 3600,
},
})
assert.NoError(t, err)
var (
data []byte
trace = &types.BlockTrace{}
)
data, err = os.ReadFile("../../common/testdata/blockTrace_02.json")
assert.NoError(t, err)
assert.NoError(t, json.Unmarshal(data, &trace))
assert.NoError(t, rdb.SetBlockTrace(context.Background(), trace))
exist, err := rdb.ExistTrace(context.Background(), trace.Header.Number)
assert.NoError(t, err)
assert.Equal(t, true, exist)
}
func TestNewRedisClientWrapper(t *testing.T) {
t.Log(time.Now().Unix())
}

View File

@@ -15,5 +15,5 @@ func TestRunDatabase(t *testing.T) {
// wait result
bridge.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("db_cli version %s", version.Version))
bridge.RunApp(false)
bridge.RunApp(nil)
}

View File

@@ -20,8 +20,9 @@ func getConfig(ctx *cli.Context) (*database.DBConfig, error) {
return dbCfg, nil
}
func initDB(dbCfg *database.DBConfig) (*sqlx.DB, error) {
factory, err := database.NewOrmFactory(dbCfg)
func initDB(cfg *database.DBConfig) (*sqlx.DB, error) {
dbCfg := cfg.Persistence
factory, err := database.NewOrmFactory(cfg)
if err != nil {
return nil, err
}

View File

@@ -4,10 +4,12 @@ import (
"encoding/json"
"os"
"path/filepath"
"scroll-tech/database/cache"
)
// DBConfig db config
type DBConfig struct {
// PersistenceConfig persistence db config.
type PersistenceConfig struct {
// data source name
DSN string `json:"dsn"`
DriverName string `json:"driver_name"`
@@ -16,6 +18,12 @@ type DBConfig struct {
MaxIdleNum int `json:"maxIdleNum" default:"20"`
}
// DBConfig db config
type DBConfig struct {
Persistence *PersistenceConfig `json:"persistence"`
Redis *cache.RedisConfig `json:"redis"`
}
// NewConfig returns a new instance of Config.
func NewConfig(file string) (*DBConfig, error) {
buf, err := os.ReadFile(filepath.Clean(file))

View File

@@ -1,4 +1,12 @@
{
"dsn": "postgres://postgres:123456@localhost:5444/test?sslmode=disable",
"driver_name": "postgres"
"persistence": {
"driver_name": "postgres",
"dsn": "postgres://admin:123456@localhost/test?sslmode=disable"
},
"redis": {
"url": "redis://default:@localhost:6379/0",
"expirations": {
"trace": 3600
}
}
}

View File

@@ -7,18 +7,22 @@ require (
github.com/lib/pq v1.10.6
github.com/mattn/go-sqlite3 v1.14.14
github.com/pressly/goose/v3 v3.7.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/redis/go-redis/v9 v9.0.0-rc.4
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
)
require (
github.com/VictoriaMetrics/fastcache v1.12.0 // indirect
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
github.com/go-stack/stack v1.8.0 // indirect
github.com/golang/snappy v0.0.4 // indirect
github.com/iden3/go-iden3-crypto v0.0.13 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
@@ -28,8 +32,8 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
golang.org/x/tools v0.3.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -38,6 +38,8 @@ github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAE
github.com/StackExchange/wmi v0.0.0-20180116203802-5d049714c4a6/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
github.com/VictoriaMetrics/fastcache v1.6.0 h1:C/3Oi3EiBCqufydp1neRZkqcwmEiuRT9c3fqvvgKm5o=
github.com/VictoriaMetrics/fastcache v1.6.0/go.mod h1:0qHz5QP0GMX4pfmMA/zt5RgfNuXJrTP0zS7DqpHGGTw=
github.com/VictoriaMetrics/fastcache v1.12.0 h1:vnVi/y9yKDcD9akmc4NqAoqgQhJrOwUF+j9LTgn4QDE=
github.com/VictoriaMetrics/fastcache v1.12.0/go.mod h1:tjiYeEfYXCqacuvYw/7UoDIeJaNxq6132xHICNP77w8=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/ajstarks/svgo v0.0.0-20180226025133-644b8db467af/go.mod h1:K08gAheRH3/J6wwsYMMT4xOr94bZjxIelGM0+d/wbFw=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@@ -70,10 +72,12 @@ github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46f
github.com/c-bata/go-prompt v0.2.2/go.mod h1:VzqtzE2ksDBcdln8G7mk2RX9QyGjH+OVqOCSiVIqS34=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -97,6 +101,8 @@ github.com/deepmap/oapi-codegen v1.6.0/go.mod h1:ryDa9AgbELGeB+YEXE1dR53yAjHwFvE
github.com/deepmap/oapi-codegen v1.8.2/go.mod h1:YLgSKSDv/bZQB7N4ws6luhozi3cEdRktEqrX88CvjIw=
github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ=
github.com/dgryski/go-bitstream v0.0.0-20180413035011-3522498ce2c8/go.mod h1:VMaSuZ+SZcx/wljOQKvp5srsbCiKDEb6K2wC4+PiBmQ=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
github.com/dgryski/go-sip13 v0.0.0-20181026042036-e10d5fee7954/go.mod h1:vAd38F8PWV+bWy6jNmig1y/TA+kYO4g3RSRF0IAv0no=
github.com/dlclark/regexp2 v1.4.1-0.20201116162257-a2a8dda75c91/go.mod h1:2pZnwuY/m+8K6iRw6wQdMtk+rH5tNGR1i55kozfMjCc=
github.com/docker/docker v1.4.2-0.20180625184442-8e610b2b55bf/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
@@ -113,6 +119,7 @@ github.com/fatih/color v1.7.0/go.mod h1:Zm6kSWBoL9eyXnKyktHP6abPY2pDugNf5Kwzbycv
github.com/fjl/memsize v0.0.0-20190710130421-bcb5799ab5e5/go.mod h1:VvhXpOYNQvB+uIk2RvXzuaQtkQJzzIx6lSBe1xv7hi0=
github.com/fogleman/gg v1.2.1-0.20190220221249-0403632d5b90/go.mod h1:R/bRT+9gY/C5z7JzPU0zXsXHKM4/ayA+zqcVNZzPa1k=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/fsnotify/fsnotify v1.4.9 h1:hsms1Qyu0jgnwNXIxa+/V/PDsU6CfLf6CNO8H7IWoS4=
github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ=
github.com/gballet/go-libpcsclite v0.0.0-20190607065134-2772fd86a8ff/go.mod h1:x7DCsMOv1taUwEWCzT4cmDeAkigA5/QCwUodaVOe8Ww=
github.com/getkin/kin-openapi v0.53.0/go.mod h1:7Yn5whZr5kJi6t+kShccXS8ae1APpYTW6yheSwk8Yi4=
@@ -172,6 +179,7 @@ github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.4.1/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9 h1:O2Tfq5qg4qc4AmwVlvv0oLiVAGB7enBSJ2x2DqQFi38=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/pprof v0.0.0-20181206194817-3ea8567a2e57/go.mod h1:zfwlbNMJ+OItoe0UupaVj+oy1omPYYDuagoSzA8v9mc=
@@ -288,6 +296,7 @@ github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRW
github.com/naoina/go-stringutil v0.1.0/go.mod h1:XJ2SJL9jCtBh+P9q5btrd/Ylo8XwT/h1USek5+NqSA0=
github.com/naoina/toml v0.1.2-0.20170918210437-9fafd6967416/go.mod h1:NBIhNtsFMo3G2szEBne+bO4gS192HuIYRqfvOWb4i1E=
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
github.com/nxadm/tail v1.4.8 h1:nPr65rt6Y5JFSKQO7qToXr7pePgD6Gwiw05lkbyAQTE=
github.com/oklog/ulid v1.3.1/go.mod h1:CirwcVhetQ6Lv90oh/F+FBtV6XMibvdAFo93nm5qn4U=
github.com/olekukonko/tablewriter v0.0.5 h1:P2Ga83D34wi1o9J6Wh1mRuqd4mF/x/lgBS7N7AbDhec=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
@@ -295,9 +304,11 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk=
github.com/onsi/ginkgo v1.14.0/go.mod h1:iSB4RoI2tjJc9BBv4NKIKWKya62Rps+oPG/Lv9klQyY=
github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE=
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.25.0 h1:Vw7br2PCDYijJHSfBOWhov+8cAnUf8MfMaIOV323l6Y=
github.com/opentracing/opentracing-go v1.0.2/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.0.3-0.20180606204148-bd9c31933947/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
@@ -328,6 +339,8 @@ github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/tsdb v0.7.1 h1:YZcsG11NqnK4czYLrWd9mpEuAJIHVQLwdrleYfszMAA=
github.com/prometheus/tsdb v0.7.1/go.mod h1:qhTCs0VvXwvX/y3TZrWD7rabWM+ijKTux40TwIPHuXU=
github.com/redis/go-redis/v9 v9.0.0-rc.4 h1:JUhsiZMTZknz3vn50zSVlkwcSeTGPd51lMO3IKUrWpY=
github.com/redis/go-redis/v9 v9.0.0-rc.4/go.mod h1:Vo3EsyWnicKnSKCA7HhgnvnyA74wOA69Cd2Meli5mmA=
github.com/remyoudompheng/bigfft v0.0.0-20200410134404-eec4a21b6bb0 h1:OdAsTTz6OkFY5QxjkYwrChwuRruF69c169dPK26NUlk=
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52/go.mod h1:RDpi1RftBQPUCDRw6SmxeaREsAaRKnOclghuzp/WRzc=
github.com/rjeczalik/notify v0.9.1/go.mod h1:rKwnCoCGeuQnwBtTSPL9Dad03Vh2n40ePRrjvIXnJho=
@@ -339,8 +352,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -362,6 +375,7 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -369,8 +383,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -412,8 +427,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -468,6 +483,7 @@ golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v
golang.org/x/net v0.0.0-20210805182204-aaa1db679c0d/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.5.0 h1:GyT4nK/YDHSqa1c4753ouYCDajOYKTja9Xb/OHtgvSw=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@@ -524,10 +540,11 @@ golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210816183151-1e6c022a8912/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220405052023-b1e9470b6e64/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -539,6 +556,7 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.6.0 h1:3XmdazWV+ubf7QgHSTWeykHOci5oeekaGJBLkrkaw4k=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@@ -628,6 +646,7 @@ gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
gopkg.in/natefinch/npipe.v2 v2.0.0-20160621034901-c1b8fa8bdcce/go.mod h1:5AcXVHNjg+BDxry382+8OKon8SEWiKktQR07RKPsv1c=
gopkg.in/olebedev/go-duktape.v3 v3.0.0-20200619000410-60c24ae608a6/go.mod h1:uAJfkITjFhyEEuUfm7bsmCZRbW5WRq8s9EY8HZ6hCns=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/urfave/cli.v1 v1.20.0/go.mod h1:vuBzUtMdQeixQj8LVd+/98pzhxNGQoyuPBlsXHOQNO0=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@@ -10,21 +10,30 @@ import (
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/database/cache"
)
var (
pgDB *sqlx.DB
dbImg docker.ImgInstance
pgDB *sqlx.DB
dbImg docker.ImgInstance
redisImg docker.ImgInstance
)
func initEnv(t *testing.T) error {
// Start db container.
dbImg = docker.NewTestDBDocker(t, "postgres")
redisImg = docker.NewTestRedisDocker(t)
// Create db orm handler.
factory, err := database.NewOrmFactory(&database.DBConfig{
DriverName: "postgres",
DSN: dbImg.Endpoint(),
Persistence: &database.PersistenceConfig{
DriverName: "postgres",
DSN: dbImg.Endpoint(),
},
Redis: &cache.RedisConfig{
Expirations: map[string]int64{"trace": 30},
URL: redisImg.Endpoint(),
},
})
if err != nil {
return err
@@ -48,6 +57,9 @@ func TestMigrate(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
})
}

View File

@@ -3,6 +3,7 @@
-- TODO: use foreign key for batch_id?
-- TODO: why tx_num is bigint?
-- TODO: trace content is stored in cache, this field is empty and can be removed later.
create table block_trace
(
number BIGINT NOT NULL,

View File

@@ -20,7 +20,7 @@ create table l1_message
);
comment
on column l1_message.status is 'undefined, pending, submitted, confirmed';
on column l1_message.status is 'undefined, pending, submitted, confirmed, failed, expired';
create unique index l1_message_hash_uindex
on l1_message (msg_hash);

View File

@@ -21,7 +21,7 @@ create table l2_message
);
comment
on column l2_message.status is 'undefined, pending, submitted, confirmed';
on column l2_message.status is 'undefined, pending, submitted, confirmed, failed, expired';
create unique index l2_message_hash_uindex
on l2_message (msg_hash);

View File

@@ -14,6 +14,8 @@ import (
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/common/utils"
"scroll-tech/database/cache"
)
// ProvingStatus block_batch proving_status (unassigned, assigned, proved, verified, submitted)
@@ -101,14 +103,25 @@ type BlockBatch struct {
}
type blockBatchOrm struct {
db *sqlx.DB
db *sqlx.DB
cache cache.Cache
}
var _ BlockBatchOrm = (*blockBatchOrm)(nil)
// NewBlockBatchOrm create an blockBatchOrm instance
func NewBlockBatchOrm(db *sqlx.DB) BlockBatchOrm {
return &blockBatchOrm{db: db}
func NewBlockBatchOrm(db *sqlx.DB, cache cache.Cache) BlockBatchOrm {
return &blockBatchOrm{db: db, cache: cache}
}
// GetBatchIDByIndex get batch id by index.
func (o *blockBatchOrm) GetBatchIDByIndex(index uint64) (string, error) {
row := o.db.QueryRowx(`SELECT id FROM public.block_batch WHERE index = $1;`, index)
var id string
if err := row.Scan(&id); err != nil {
return "", err
}
return id, nil
}
func (o *blockBatchOrm) GetBlockBatches(fields map[string]interface{}, args ...string) ([]*BlockBatch, error) {
@@ -159,11 +172,11 @@ func (o *blockBatchOrm) GetVerifiedProofAndInstanceByID(id string) ([]byte, []by
return proof, instance, nil
}
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instance_commitments []byte, proofTimeSec uint64) error {
func (o *blockBatchOrm) UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error {
db := o.db
if _, err := db.ExecContext(ctx,
db.Rebind(`UPDATE block_batch set proof = ?, instance_commitments = ?, proof_time_sec = ? where id = ?;`),
proof, instance_commitments, proofTimeSec, id,
proof, instanceCommitments, proofTimeSec, id,
); err != nil {
log.Error("failed to update proof", "err", err)
}
@@ -215,8 +228,8 @@ func (o *blockBatchOrm) NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, end
"total_tx_num": totalTxNum,
"total_l2_gas": totalL2Gas,
"created_at": time.Now(),
// "proving_status": ProvingTaskUnassigned, // actually no need, because we have default value in DB schema
// "rollup_status": RollupPending, // actually no need, because we have default value in DB schema
// "proving_status": ProvingTaskUnassigned, // actually no need, because we have default value in Persistence schema
// "rollup_status": RollupPending, // actually no need, because we have default value in Persistence schema
}); err != nil {
return "", err
}
@@ -236,8 +249,8 @@ func (o *blockBatchOrm) BatchRecordExist(id string) (bool, error) {
return true, nil
}
func (o *blockBatchOrm) GetPendingBatches() ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC`, RollupPending)
func (o *blockBatchOrm) GetPendingBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupPending, limit)
if err != nil {
return nil, err
}
@@ -260,7 +273,7 @@ func (o *blockBatchOrm) GetPendingBatches() ([]string, error) {
}
func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
row := o.db.QueryRowx(`SELECT * FROM block_batch WHERE rollup_status = $1 OR rollup_status = $2 ORDER BY index DESC;`, RollupFinalized, RollupFinalizationSkipped)
row := o.db.QueryRowx(`select * from block_batch where index = (select max(index) from block_batch where rollup_status = $1);`, RollupFinalized)
batch := &BlockBatch{}
if err := row.StructScan(batch); err != nil {
return nil, err
@@ -268,8 +281,8 @@ func (o *blockBatchOrm) GetLatestFinalizedBatch() (*BlockBatch, error) {
return batch, nil
}
func (o *blockBatchOrm) GetCommittedBatches() ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC`, RollupCommitted)
func (o *blockBatchOrm) GetCommittedBatches(limit uint64) ([]string, error) {
rows, err := o.db.Queryx(`SELECT id FROM block_batch WHERE rollup_status = $1 ORDER BY index ASC LIMIT $2`, RollupCommitted, limit)
if err != nil {
return nil, err
}
@@ -305,7 +318,7 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
return make([]RollupStatus, 0), nil
}
query, args, err := sqlx.In("SELECT rollup_status FROM block_batch WHERE id IN (?);", ids)
query, args, err := sqlx.In("SELECT id, rollup_status FROM block_batch WHERE id IN (?);", ids)
if err != nil {
return make([]RollupStatus, 0), err
}
@@ -314,17 +327,24 @@ func (o *blockBatchOrm) GetRollupStatusByIDList(ids []string) ([]RollupStatus, e
rows, err := o.db.Query(query, args...)
var statuses []RollupStatus
statusMap := make(map[string]RollupStatus)
for rows.Next() {
var id string
var status RollupStatus
if err = rows.Scan(&status); err != nil {
if err = rows.Scan(&id, &status); err != nil {
break
}
statuses = append(statuses, status)
statusMap[id] = status
}
var statuses []RollupStatus
if err != nil {
return statuses, err
}
for _, id := range ids {
statuses = append(statuses, statusMap[id])
}
return statuses, nil
}
@@ -360,24 +380,24 @@ func (o *blockBatchOrm) UpdateRollupStatus(ctx context.Context, id string, statu
}
}
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error {
func (o *blockBatchOrm) UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error {
switch status {
case RollupCommitted:
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commit_tx_hash, status, time.Now(), id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ?, committed_at = ? where id = ?;"), commitTxHash, status, time.Now(), id)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commit_tx_hash, status, id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set commit_tx_hash = ?, rollup_status = ? where id = ?;"), commitTxHash, status, id)
return err
}
}
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalize_tx_hash string, status RollupStatus) error {
func (o *blockBatchOrm) UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error {
switch status {
case RollupFinalized:
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalize_tx_hash, status, time.Now(), id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ?, finalized_at = ? where id = ?;"), finalizeTxHash, status, time.Now(), id)
return err
default:
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalize_tx_hash, status, id)
_, err := o.db.Exec(o.db.Rebind("update block_batch set finalize_tx_hash = ?, rollup_status = ? where id = ?;"), finalizeTxHash, status, id)
return err
}
}
@@ -399,3 +419,17 @@ func (o *blockBatchOrm) GetAssignedBatchIDs() ([]string, error) {
return ids, rows.Close()
}
func (o *blockBatchOrm) UpdateSkippedBatches() (int64, error) {
res, err := o.db.Exec(o.db.Rebind("update block_batch set rollup_status = ? where (proving_status = ? or proving_status = ?) and rollup_status = ?;"), RollupFinalizationSkipped, ProvingTaskSkipped, ProvingTaskFailed, RollupCommitted)
if err != nil {
return 0, err
}
count, err := res.RowsAffected()
if err != nil {
return 0, err
}
return count, nil
}

View File

@@ -1,8 +1,8 @@
package orm
import (
"context"
"database/sql"
"encoding/json"
"errors"
"fmt"
"strings"
@@ -12,18 +12,24 @@ import (
"github.com/scroll-tech/go-ethereum/core/types"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/cache"
"scroll-tech/common/utils"
)
type blockTraceOrm struct {
db *sqlx.DB
db *sqlx.DB
cache cache.Cache
}
var _ BlockTraceOrm = (*blockTraceOrm)(nil)
// NewBlockTraceOrm create an blockTraceOrm instance
func NewBlockTraceOrm(db *sqlx.DB) BlockTraceOrm {
return &blockTraceOrm{db: db}
func NewBlockTraceOrm(db *sqlx.DB, cache cache.Cache) BlockTraceOrm {
return &blockTraceOrm{
db: db,
cache: cache,
}
}
func (o *blockTraceOrm) Exist(number uint64) (bool, error) {
@@ -49,11 +55,7 @@ func (o *blockTraceOrm) GetBlockTracesLatestHeight() (int64, error) {
}
func (o *blockTraceOrm) GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error) {
type Result struct {
Trace string
}
query := "SELECT trace FROM block_trace WHERE 1 = 1 "
query := "SELECT hash FROM block_trace WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
@@ -65,18 +67,24 @@ func (o *blockTraceOrm) GetBlockTraces(fields map[string]interface{}, args ...st
return nil, err
}
var traces []*types.BlockTrace
var (
traces []*types.BlockTrace
rdb = o.cache
)
for rows.Next() {
result := &Result{}
if err = rows.StructScan(result); err != nil {
var (
trace *types.BlockTrace
hash string
)
if err = rows.Scan(&hash); err != nil {
break
}
trace := types.BlockTrace{}
err = json.Unmarshal([]byte(result.Trace), &trace)
trace, err = rdb.GetBlockTrace(context.Background(), common.HexToHash(hash))
if err != nil {
break
return nil, err
}
traces = append(traces, &trace)
traces = append(traces, trace)
}
if err != nil && !errors.Is(err, sql.ErrNoRows) {
return nil, err
@@ -153,31 +161,30 @@ func (o *blockTraceOrm) GetHashByNumber(number uint64) (*common.Hash, error) {
func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error {
traceMaps := make([]map[string]interface{}, len(blockTraces))
rdb := o.cache
for i, trace := range blockTraces {
number, hash, tx_num, mtime := trace.Header.Number.Int64(),
number, hash, txNum, mtime := trace.Header.Number.Int64(),
trace.Header.Hash().String(),
len(trace.Transactions),
trace.Header.Time
gasCost := utils.ComputeTraceGasCost(trace)
// clear the `StructLogs` to reduce storage cost
for _, executionResult := range trace.ExecutionResults {
executionResult.StructLogs = nil
}
data, err := json.Marshal(trace)
if err != nil {
log.Error("failed to marshal blockTrace", "hash", hash, "err", err)
return err
}
traceMaps[i] = map[string]interface{}{
"number": number,
"hash": hash,
"parent_hash": trace.Header.ParentHash.String(),
"trace": string(data),
"tx_num": tx_num,
"number": number,
"hash": hash,
"parent_hash": trace.Header.ParentHash.String(),
// Empty json
"trace": "{}",
"tx_num": txNum,
"gas_used": gasCost,
"block_timestamp": mtime,
}
if rdb != nil {
err := rdb.SetBlockTrace(context.Background(), trace)
if err != nil {
return err
}
}
}
_, err := o.db.NamedExec(`INSERT INTO public.block_trace (number, hash, parent_hash, trace, tx_num, gas_used, block_timestamp) VALUES (:number, :hash, :parent_hash, :trace, :tx_num, :gas_used, :block_timestamp);`, traceMaps)
if err != nil {
@@ -186,8 +193,8 @@ func (o *blockTraceOrm) InsertBlockTraces(blockTraces []*types.BlockTrace) error
return err
}
func (o *blockTraceOrm) DeleteTracesByBatchID(batch_id string) error {
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batch_id); err != nil {
func (o *blockTraceOrm) DeleteTracesByBatchID(batchID string) error {
if _, err := o.db.Exec(o.db.Rebind("update block_trace set trace = ? where batch_id = ?;"), "{}", batchID); err != nil {
return err
}
return nil

View File

@@ -3,6 +3,7 @@ package orm
import (
"context"
"database/sql"
"fmt"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/common"
@@ -27,6 +28,9 @@ const (
// MsgFailed represents the from_layer message status is failed
MsgFailed
// MsgExpired represents the from_layer message status is expired
MsgExpired
)
// L1Message is structure of stored layer1 bridge message
@@ -84,6 +88,19 @@ const (
RollerProofInvalid
)
func (s RollerProveStatus) String() string {
switch s {
case RollerAssigned:
return "RollerAssigned"
case RollerProofValid:
return "RollerProofValid"
case RollerProofInvalid:
return "RollerProofInvalid"
default:
return fmt.Sprintf("Bad Value: %d", int32(s))
}
}
// RollerStatus is the roller name and roller prove status
type RollerStatus struct {
PublicKey string `json:"public_key"`
@@ -104,7 +121,7 @@ type BlockTraceOrm interface {
GetBlockTracesLatestHeight() (int64, error)
GetBlockTraces(fields map[string]interface{}, args ...string) ([]*types.BlockTrace, error)
GetBlockInfos(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
// add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
// GetUnbatchedBlocks add `GetUnbatchedBlocks` because `GetBlockInfos` cannot support query "batch_id is NULL"
GetUnbatchedBlocks(fields map[string]interface{}, args ...string) ([]*BlockInfo, error)
GetHashByNumber(number uint64) (*common.Hash, error)
DeleteTracesByBatchID(batchID string) error
@@ -120,38 +137,43 @@ type SessionInfoOrm interface {
// BlockBatchOrm block_batch operation interface
type BlockBatchOrm interface {
GetBatchIDByIndex(index uint64) (string, error)
GetBlockBatches(fields map[string]interface{}, args ...string) ([]*BlockBatch, error)
GetProvingStatusByID(id string) (ProvingStatus, error)
GetVerifiedProofAndInstanceByID(id string) ([]byte, []byte, error)
UpdateProofByID(ctx context.Context, id string, proof, instance_commitments []byte, proofTimeSec uint64) error
UpdateProofByID(ctx context.Context, id string, proof, instanceCommitments []byte, proofTimeSec uint64) error
UpdateProvingStatus(id string, status ProvingStatus) error
ResetProvingStatusFor(before ProvingStatus) error
NewBatchInDBTx(dbTx *sqlx.Tx, startBlock *BlockInfo, endBlock *BlockInfo, parentHash string, totalTxNum uint64, gasUsed uint64) (string, error)
BatchRecordExist(id string) (bool, error)
GetPendingBatches() ([]string, error)
GetCommittedBatches() ([]string, error)
GetPendingBatches(limit uint64) ([]string, error)
GetCommittedBatches(limit uint64) ([]string, error)
GetRollupStatus(id string) (RollupStatus, error)
GetRollupStatusByIDList(ids []string) ([]RollupStatus, error)
GetCommitTxHash(id string) (sql.NullString, error)
GetFinalizeTxHash(id string) (sql.NullString, error)
GetLatestFinalizedBatch() (*BlockBatch, error)
UpdateRollupStatus(ctx context.Context, id string, status RollupStatus) error
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commit_tx_hash string, status RollupStatus) error
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalize_tx_hash string, status RollupStatus) error
UpdateCommitTxHashAndRollupStatus(ctx context.Context, id string, commitTxHash string, status RollupStatus) error
UpdateFinalizeTxHashAndRollupStatus(ctx context.Context, id string, finalizeTxHash string, status RollupStatus) error
GetAssignedBatchIDs() ([]string, error)
UpdateSkippedBatches() (int64, error)
GetCommitTxHash(id string) (sql.NullString, error) // for unit tests only
GetFinalizeTxHash(id string) (sql.NullString, error) // for unit tests only
}
// L1MessageOrm is layer1 message db interface
type L1MessageOrm interface {
GetL1MessageByNonce(nonce uint64) (*L1Message, error)
GetL1MessageByMsgHash(msgHash string) (*L1Message, error)
GetL1MessagesByStatus(status MsgStatus) ([]*L1Message, error)
GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error)
GetL1ProcessedNonce() (int64, error)
SaveL1Messages(ctx context.Context, messages []*L1Message) error
UpdateLayer2Hash(ctx context.Context, msgHash string, layer2Hash string) error
UpdateLayer1Status(ctx context.Context, msgHash string, status MsgStatus) error
UpdateLayer1StatusAndLayer2Hash(ctx context.Context, msgHash string, status MsgStatus, layer2Hash string) error
GetLayer1LatestWatchedHeight() (int64, error)
GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
}
// L2MessageOrm is layer2 message db interface
@@ -160,7 +182,7 @@ type L2MessageOrm interface {
GetL2MessageByMsgHash(msgHash string) (*L2Message, error)
MessageProofExist(nonce uint64) (bool, error)
GetMessageProofByNonce(nonce uint64) (string, error)
GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error)
GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error)
GetL2ProcessedNonce() (int64, error)
SaveL2Messages(ctx context.Context, messages []*L2Message) error
UpdateLayer1Hash(ctx context.Context, msgHash string, layer1Hash string) error
@@ -168,4 +190,6 @@ type L2MessageOrm interface {
UpdateLayer2StatusAndLayer1Hash(ctx context.Context, msgHash string, status MsgStatus, layer1Hash string) error
UpdateMessageProof(ctx context.Context, nonce uint64, proof string) error
GetLayer2LatestWatchedHeight() (int64, error)
GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) // for unit tests only
}

View File

@@ -7,17 +7,20 @@ import (
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/cache"
)
type l1MessageOrm struct {
db *sqlx.DB
db *sqlx.DB
cache cache.Cache
}
var _ L1MessageOrm = (*l1MessageOrm)(nil)
// NewL1MessageOrm create an L1MessageOrm instance
func NewL1MessageOrm(db *sqlx.DB) L1MessageOrm {
return &l1MessageOrm{db: db}
func NewL1MessageOrm(db *sqlx.DB, cache cache.Cache) L1MessageOrm {
return &l1MessageOrm{db: db, cache: cache}
}
// GetL1MessageByMsgHash fetch message by nonce
@@ -45,8 +48,8 @@ func (m *l1MessageOrm) GetL1MessageByNonce(nonce uint64) (*L1Message, error) {
}
// GetL1MessagesByStatus fetch list of unprocessed messages given msg status
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus) ([]*L1Message, error) {
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC;`, status)
func (m *l1MessageOrm) GetL1MessagesByStatus(status MsgStatus, limit uint64) ([]*L1Message, error) {
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer1_hash, status FROM l1_message WHERE status = $1 ORDER BY nonce ASC LIMIT $2;`, status, limit)
if err != nil {
return nil, err
}
@@ -167,3 +170,12 @@ func (m *l1MessageOrm) GetLayer1LatestWatchedHeight() (int64, error) {
}
return -1, nil
}
func (m *l1MessageOrm) GetRelayL1MessageTxHash(nonce uint64) (sql.NullString, error) {
row := m.db.QueryRow(`SELECT layer2_hash FROM l1_message WHERE nonce = $1`, nonce)
var hash sql.NullString
if err := row.Scan(&hash); err != nil {
return sql.NullString{}, err
}
return hash, nil
}

View File

@@ -5,20 +5,24 @@ import (
"database/sql"
"errors"
"fmt"
"strings"
"github.com/jmoiron/sqlx"
"github.com/scroll-tech/go-ethereum/log"
"scroll-tech/database/cache"
)
type layer2MessageOrm struct {
db *sqlx.DB
db *sqlx.DB
cache cache.Cache
}
var _ L2MessageOrm = (*layer2MessageOrm)(nil)
// NewL2MessageOrm create an L2MessageOrm instance
func NewL2MessageOrm(db *sqlx.DB) L2MessageOrm {
return &layer2MessageOrm{db: db}
func NewL2MessageOrm(db *sqlx.DB, cache cache.Cache) L2MessageOrm {
return &layer2MessageOrm{db: db, cache: cache}
}
// GetL2MessageByNonce fetch message by nonce
@@ -88,8 +92,15 @@ func (m *layer2MessageOrm) GetL2ProcessedNonce() (int64, error) {
}
// GetL2MessagesByStatus fetch list of messages given msg status
func (m *layer2MessageOrm) GetL2MessagesByStatus(status MsgStatus) ([]*L2Message, error) {
rows, err := m.db.Queryx(`SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE status = $1 ORDER BY nonce ASC;`, status)
func (m *layer2MessageOrm) GetL2Messages(fields map[string]interface{}, args ...string) ([]*L2Message, error) {
query := "SELECT nonce, msg_hash, height, sender, target, value, fee, gas_limit, deadline, calldata, layer2_hash FROM l2_message WHERE 1 = 1 "
for key := range fields {
query += fmt.Sprintf("AND %s=:%s ", key, key)
}
query = strings.Join(append([]string{query}, args...), " ")
db := m.db
rows, err := db.NamedQuery(db.Rebind(query), fields)
if err != nil {
return nil, err
}
@@ -198,3 +209,12 @@ func (m *layer2MessageOrm) GetLayer2LatestWatchedHeight() (int64, error) {
}
return height, nil
}
func (m *layer2MessageOrm) GetRelayL2MessageTxHash(nonce uint64) (sql.NullString, error) {
row := m.db.QueryRow(`SELECT layer1_hash FROM l2_message WHERE nonce = $1`, nonce)
var hash sql.NullString
if err := row.Scan(&hash); err != nil {
return sql.NullString{}, err
}
return hash, nil
}

View File

@@ -4,17 +4,20 @@ import (
"encoding/json"
"github.com/jmoiron/sqlx"
"scroll-tech/database/cache"
)
type sessionInfoOrm struct {
db *sqlx.DB
db *sqlx.DB
cache cache.Cache
}
var _ SessionInfoOrm = (*sessionInfoOrm)(nil)
// NewSessionInfoOrm create an sessionInfoOrm instance
func NewSessionInfoOrm(db *sqlx.DB) SessionInfoOrm {
return &sessionInfoOrm{db: db}
func NewSessionInfoOrm(db *sqlx.DB, cache cache.Cache) SessionInfoOrm {
return &sessionInfoOrm{db: db, cache: cache}
}
func (o *sessionInfoOrm) GetSessionInfosByIDs(ids []string) ([]*SessionInfo, error) {

View File

@@ -1,9 +1,12 @@
package database
import (
"fmt"
"github.com/jmoiron/sqlx"
_ "github.com/lib/pq" //nolint:golint
"scroll-tech/database/cache"
"scroll-tech/database/orm"
)
@@ -14,6 +17,7 @@ type OrmFactory interface {
orm.L1MessageOrm
orm.L2MessageOrm
orm.SessionInfoOrm
cache.Cache
GetDB() *sqlx.DB
Beginx() (*sqlx.Tx, error)
Close() error
@@ -26,28 +30,41 @@ type ormFactory struct {
orm.L2MessageOrm
orm.SessionInfoOrm
*sqlx.DB
// cache interface.
cache.Cache
}
// NewOrmFactory create an ormFactory factory include all ormFactory interface
func NewOrmFactory(cfg *DBConfig) (OrmFactory, error) {
pCfg, rCfg := cfg.Persistence, cfg.Redis
if rCfg == nil {
return nil, fmt.Errorf("redis config is empty")
}
// Initialize sql/sqlx
db, err := sqlx.Open(cfg.DriverName, cfg.DSN)
db, err := sqlx.Open(pCfg.DriverName, pCfg.DSN)
if err != nil {
return nil, err
}
db.SetMaxIdleConns(cfg.MaxOpenNum)
db.SetMaxIdleConns(cfg.MaxIdleNum)
db.SetMaxIdleConns(pCfg.MaxOpenNum)
db.SetMaxIdleConns(pCfg.MaxIdleNum)
if err = db.Ping(); err != nil {
return nil, err
}
// Create redis client.
cacheOrm, err := cache.NewRedisClientWrapper(rCfg)
if err != nil {
return nil, err
}
return &ormFactory{
BlockTraceOrm: orm.NewBlockTraceOrm(db),
BlockBatchOrm: orm.NewBlockBatchOrm(db),
L1MessageOrm: orm.NewL1MessageOrm(db),
L2MessageOrm: orm.NewL2MessageOrm(db),
SessionInfoOrm: orm.NewSessionInfoOrm(db),
BlockTraceOrm: orm.NewBlockTraceOrm(db, cacheOrm),
BlockBatchOrm: orm.NewBlockBatchOrm(db, cacheOrm),
L1MessageOrm: orm.NewL1MessageOrm(db, cacheOrm),
L2MessageOrm: orm.NewL2MessageOrm(db, cacheOrm),
SessionInfoOrm: orm.NewSessionInfoOrm(db, cacheOrm),
Cache: cacheOrm,
DB: db,
}, nil
}

View File

@@ -15,6 +15,7 @@ import (
"scroll-tech/common/docker"
"scroll-tech/database"
"scroll-tech/database/cache"
"scroll-tech/database/migrate"
"scroll-tech/database/orm"
)
@@ -80,6 +81,7 @@ var (
dbConfig *database.DBConfig
dbImg docker.ImgInstance
redisImg docker.ImgInstance
ormBlock orm.BlockTraceOrm
ormLayer1 orm.L1MessageOrm
ormLayer2 orm.L2MessageOrm
@@ -89,9 +91,18 @@ var (
func setupEnv(t *testing.T) error {
// Init db config and start db container.
dbConfig = &database.DBConfig{DriverName: "postgres"}
dbImg = docker.NewTestDBDocker(t, dbConfig.DriverName)
dbConfig.DSN = dbImg.Endpoint()
dbImg = docker.NewTestDBDocker(t, "postgres")
redisImg = docker.NewTestRedisDocker(t)
dbConfig = &database.DBConfig{
Persistence: &database.PersistenceConfig{
DriverName: "postgres",
DSN: dbImg.Endpoint(),
},
Redis: &cache.RedisConfig{
URL: redisImg.Endpoint(),
Expirations: map[string]int64{"trace": 30},
},
}
// Create db handler and reset db.
factory, err := database.NewOrmFactory(dbConfig)
@@ -100,11 +111,11 @@ func setupEnv(t *testing.T) error {
assert.NoError(t, migrate.ResetDB(db.DB))
// Init several orm handles.
ormBlock = orm.NewBlockTraceOrm(db)
ormLayer1 = orm.NewL1MessageOrm(db)
ormLayer2 = orm.NewL2MessageOrm(db)
ormBatch = orm.NewBlockBatchOrm(db)
ormSession = orm.NewSessionInfoOrm(db)
ormBlock = orm.BlockTraceOrm(factory)
ormLayer1 = orm.L1MessageOrm(factory)
ormLayer2 = orm.L2MessageOrm(factory)
ormBatch = orm.BlockBatchOrm(factory)
ormSession = orm.SessionInfoOrm(factory)
templateBlockTrace, err := os.ReadFile("../common/testdata/blockTrace_03.json")
if err != nil {
@@ -115,16 +126,21 @@ func setupEnv(t *testing.T) error {
return json.Unmarshal(templateBlockTrace, blockTrace)
}
func freeDB(t *testing.T) {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
if redisImg != nil {
assert.NoError(t, redisImg.Stop())
}
}
// TestOrmFactory run several test cases.
func TestOrmFactory(t *testing.T) {
defer func() {
if dbImg != nil {
assert.NoError(t, dbImg.Stop())
}
}()
if err := setupEnv(t); err != nil {
t.Fatal(err)
}
defer freeDB(t)
t.Run("testOrmBlockTraces", testOrmBlockTraces)
@@ -281,7 +297,7 @@ func testOrmBlockBatch(t *testing.T) {
assert.NoError(t, err)
assert.Equal(t, int(2), len(batches))
batcheIDs, err := ormBatch.GetPendingBatches()
batcheIDs, err := ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(2), len(batcheIDs))
assert.Equal(t, batchID1, batcheIDs[0])
@@ -290,33 +306,51 @@ func testOrmBlockBatch(t *testing.T) {
err = ormBatch.UpdateCommitTxHashAndRollupStatus(context.Background(), batchID1, "commit_tx_1", orm.RollupCommitted)
assert.NoError(t, err)
batcheIDs, err = ormBatch.GetPendingBatches()
batcheIDs, err = ormBatch.GetPendingBatches(10)
assert.NoError(t, err)
assert.Equal(t, int(1), len(batcheIDs))
assert.Equal(t, batchID2, batcheIDs[0])
proving_status, err := ormBatch.GetProvingStatusByID(batchID1)
provingStatus, err := ormBatch.GetProvingStatusByID(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.ProvingTaskUnassigned, proving_status)
assert.Equal(t, orm.ProvingTaskUnassigned, provingStatus)
err = ormBatch.UpdateProofByID(context.Background(), batchID1, []byte{1}, []byte{2}, 1200)
assert.NoError(t, err)
err = ormBatch.UpdateProvingStatus(batchID1, orm.ProvingTaskVerified)
assert.NoError(t, err)
proving_status, err = ormBatch.GetProvingStatusByID(batchID1)
provingStatus, err = ormBatch.GetProvingStatusByID(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.ProvingTaskVerified, proving_status)
assert.Equal(t, orm.ProvingTaskVerified, provingStatus)
rollup_status, err := ormBatch.GetRollupStatus(batchID1)
rollupStatus, err := ormBatch.GetRollupStatus(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.RollupCommitted, rollup_status)
assert.Equal(t, orm.RollupCommitted, rollupStatus)
err = ormBatch.UpdateFinalizeTxHashAndRollupStatus(context.Background(), batchID1, "finalize_tx_1", orm.RollupFinalized)
assert.NoError(t, err)
rollup_status, err = ormBatch.GetRollupStatus(batchID1)
rollupStatus, err = ormBatch.GetRollupStatus(batchID1)
assert.NoError(t, err)
assert.Equal(t, orm.RollupFinalized, rollup_status)
assert.Equal(t, orm.RollupFinalized, rollupStatus)
result, err := ormBatch.GetLatestFinalizedBatch()
assert.NoError(t, err)
assert.Equal(t, batchID1, result.ID)
status1, err := ormBatch.GetRollupStatus(batchID1)
assert.NoError(t, err)
status2, err := ormBatch.GetRollupStatus(batchID2)
assert.NoError(t, err)
assert.NotEqual(t, status1, status2)
statues, err := ormBatch.GetRollupStatusByIDList([]string{batchID1, batchID2, batchID1, batchID2})
assert.NoError(t, err)
assert.Equal(t, statues[0], status1)
assert.Equal(t, statues[1], status2)
assert.Equal(t, statues[2], status1)
assert.Equal(t, statues[3], status2)
statues, err = ormBatch.GetRollupStatusByIDList([]string{batchID2, batchID1, batchID2, batchID1})
assert.NoError(t, err)
assert.Equal(t, statues[0], status2)
assert.Equal(t, statues[1], status1)
assert.Equal(t, statues[2], status2)
assert.Equal(t, statues[3], status1)
}
// testOrmSessionInfo test rollup result table functions
@@ -342,9 +376,9 @@ func testOrmSessionInfo(t *testing.T) {
ids, err := ormBatch.GetAssignedBatchIDs()
assert.NoError(t, err)
assert.Equal(t, 1, len(ids))
session_infos, err := ormSession.GetSessionInfosByIDs(ids)
sessionInfos, err := ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 0, len(session_infos))
assert.Equal(t, 0, len(sessionInfos))
sessionInfo := orm.SessionInfo{
ID: batchID,
@@ -359,25 +393,25 @@ func testOrmSessionInfo(t *testing.T) {
// insert
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 1, len(session_infos))
assert.Equal(t, sessionInfo, *session_infos[0])
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo, *sessionInfos[0])
// update
sessionInfo.Rollers["0"].Status = orm.RollerProofValid
assert.NoError(t, ormSession.SetSessionInfo(&sessionInfo))
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 1, len(session_infos))
assert.Equal(t, sessionInfo, *session_infos[0])
assert.Equal(t, 1, len(sessionInfos))
assert.Equal(t, sessionInfo, *sessionInfos[0])
// delete
assert.NoError(t, ormBatch.UpdateProvingStatus(batchID, orm.ProvingTaskVerified))
ids, err = ormBatch.GetAssignedBatchIDs()
assert.NoError(t, err)
assert.Equal(t, 0, len(ids))
session_infos, err = ormSession.GetSessionInfosByIDs(ids)
sessionInfos, err = ormSession.GetSessionInfosByIDs(ids)
assert.NoError(t, err)
assert.Equal(t, 0, len(session_infos))
assert.Equal(t, 0, len(sessionInfos))
}

View File

@@ -119,7 +119,6 @@ github.com/go-logfmt/logfmt v0.5.1/go.mod h1:WYhtIu8zTZfxdn5+rREduYbwxfcBr/Vr6KE
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
github.com/go-openapi/swag v0.19.5 h1:lTz6Ys4CmqqCQmZPBlbQENR1/GucA2bzYTE12Pw4tFY=
github.com/go-sourcemap/sourcemap v2.1.3+incompatible h1:W1iEw64niKVGogNgBN3ePyLFfuisuzeidWPMPWmECqU=
github.com/go-task/slim-sprig v0.0.0-20210107165309-348f09dbbbc0/go.mod h1:fyg7847qk6SyHyPtNmDHnmrv/HOrqktSC+C9fM+CJOE=
github.com/gofrs/uuid v3.3.0+incompatible h1:8K4tyRfvU1CYPgJsveYFQMhpFd/wXNM7iK6rR7UHz84=
github.com/golang-jwt/jwt/v4 v4.3.0 h1:kHL1vqdqWNfATmA0FNMdmZNMyZI1U6O31X4rlIPoBog=
github.com/golang-jwt/jwt/v4 v4.3.0/go.mod h1:/xlHOz8bRuivTWchD4jCa+NbatV+wEUSzwAxVc6locg=
@@ -130,17 +129,15 @@ github.com/golang/geo v0.0.0-20190916061304-5b978397cfec h1:lJwO/92dFXWeXOZdoGXg
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7 h1:5ZkaAPbicIKTF2I64qf5Fh8Aa83Q/dnOafMYV0OMwjA=
github.com/golang/mock v1.3.1 h1:qGJ6qTW+x6xX/my+8YUVl4WNpX9B7+/l2tRsHGZ7f2s=
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
github.com/golangci/lint-1 v0.0.0-20181222135242-d2cdd8c08219 h1:utua3L2IbQJmauC5IXdEA547bcoU5dozgQAfc8Onsg4=
github.com/google/btree v1.0.0 h1:0udJVsspx3VBr5FwtLhQQtuAsVc79tTq0ocGIPAU6qo=
github.com/google/flatbuffers v1.11.0 h1:O7CEyB8Cb3/DmtxODGtLHcEvpr81Jm5qLg/hsHnxA2A=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.1.1-0.20200604201612-c04b05f3adfa h1:Q75Upo5UN4JbPFURXZ8nLKYUvF85dyFRop/vQ0Rv+64=
github.com/google/martian v2.1.0+incompatible h1:/CP5g8u/VJHijgedC/Legn3BAbAaWPgecwXBIDzw5no=
github.com/google/pprof v0.0.0-20191218002539-d4f498aebedc h1:DLpL8pWq0v4JYoRpEhDfsJhhJyGKCcQM2WPW2TJs31c=
github.com/google/pprof v0.0.0-20210407192527-94a9f03dee38/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0 h1:GOZbcHa3HfsPKPlmyPyN2KEohoMXOhdMbHrvbpl2QaA=
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510 h1:El6M4kTTCOh6aBiKaUGG7oYTSPP8MxqL4YI3kZKwcP4=
github.com/google/uuid v1.2.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
@@ -150,7 +147,6 @@ github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
github.com/huin/goutil v0.0.0-20170803182201-1ca381bf3150 h1:vlNjIqmUZ9CMAWsbURYl3a6wZbw7q5RHVvlXTNS/Bs8=
github.com/ianlancetaylor/demangle v0.0.0-20181102032728-5e5cf60278f6 h1:UDMh68UUwekSh5iP2OMhRRZJiiBccgV7axzUG8vi56c=
github.com/ianlancetaylor/demangle v0.0.0-20200824232613-28f6c0f3b639/go.mod h1:aSSvb/t6k1mPoxDqO4vJh6VOCGPwU4O0C2/Eqndh1Sc=
github.com/imdario/mergo v0.3.13 h1:lFzP57bqS/wsqKssCGmtLAb8A0wKjLGrve2q3PPVcBk=
github.com/inconshreveable/mousetrap v1.0.0 h1:Z8tu5sraLXCXIcARxBp/8cbvlwVa7Z1NHg9XEKhtSvM=
github.com/influxdata/flux v0.65.1 h1:77BcVUCzvN5HMm8+j9PRBQ4iZcu98Dl4Y9rf+J5vhnc=
@@ -223,6 +219,8 @@ github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNG
github.com/retailnext/hllpp v1.0.1-0.20180308014038-101a6d2f8b52 h1:RnWNS9Hlm8BIkjr6wx8li5abe0fr73jljLycdfemTp0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221202061207-804e7edc23ba/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221213034543-78c1f57fcfea/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230112091133-2891916a0f81/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/segmentio/kafka-go v0.2.0 h1:HtCSf6B4gN/87yc5qTl7WsxPKQIIGXLPPM1bMCPOsoY=
github.com/sergi/go-diff v1.0.0 h1:Kpca3qRNrduNnOQeazBd0ysaKrUJiIuISHxogkT9RPQ=
github.com/shopspring/decimal v1.3.1 h1:2Usl1nmF/WZucqkFZhnfFYxxxu8LG21F6nPQBE5gKV8=
@@ -270,6 +268,7 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d h1:TzXSXBo42m9gQenoE3b9BG
golang.org/x/sys v0.2.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.3.0 h1:qoo4akIqOcDME5bhc/NgxUdovd6BSS2uMsVjB56q1xI=
golang.org/x/term v0.3.0/go.mod h1:q750SLmJuPmVoN1blW3UFBPREJfb1KmY3vwxfr+nFDA=
golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ=
golang.org/x/tools v0.1.8-0.20211029000441-d6a9af8af023/go.mod h1:nABZi5QlRsZVlzPpHl034qft6wpY4eDcsTt5AaioBiU=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
@@ -280,9 +279,7 @@ google.golang.org/api v0.15.0 h1:yzlyyDW/J0w8yNFJIhiAJy4kq74S+1DOLdawELNxFMA=
google.golang.org/appengine v1.6.5 h1:tycE03LOZYQNhDpS27tcQdAzLCVMaj7QT2SXxebnpCM=
google.golang.org/genproto v0.0.0-20200108215221-bd8f9a0ef82f h1:2wh8dWY8959cBGQvk1RD+/eQBgRYYDaZ+hT0/zsARoA=
google.golang.org/grpc v1.26.0 h1:2dTRdpdFEEhJYQD8EMLB61nnrzSCTbG38PhqdhvOltg=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc=
gopkg.in/errgo.v2 v2.1.0 h1:0vLT13EuvQ0hNvakwLuFZ/jYrLp5F3kcWHXdRggjCE8=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=

View File

@@ -4,20 +4,20 @@ IMAGE_NAME=go-roller
IMAGE_VERSION=latest
ifeq (4.3,$(firstword $(sort $(MAKE_VERSION) 4.3)))
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "#" -f2 | cut -c-7)
else
ZK_VERSION=$(shell grep -m 1 "common-rs" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
ZK_VERSION=$(shell grep -m 1 "scroll-zkevm" ../common/libzkp/impl/Cargo.lock | cut -d "\#" -f2 | cut -c-7)
endif
libzkp:
cd ../common/libzkp/impl && cargo build --release && cp ./target/release/libzkp.a ../interface/
cp -r ../common/libzkp/interface ./prover/lib
rm -rf ./prover/lib && cp -r ../common/libzkp/interface ./prover/lib
roller: libzkp ## Build the Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -o $(PWD)/build/bin/roller ./cmd
gpu-roller: libzkp ## Build the GPU Roller instance.
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZK_VERSION=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/roller ./cmd
GOBIN=$(PWD)/build/bin go build -ldflags "-X scroll-tech/common/version.ZkVersion=${ZK_VERSION}" -tags gpu -o $(PWD)/build/bin/roller ./cmd
mock_roller:
GOBIN=$(PWD)/build/bin go build -tags mock_prover -o $(PWD)/build/bin/roller $(PWD)/cmd
@@ -29,7 +29,7 @@ test-gpu-prover: libzkp
go test -tags="gpu ffi" -timeout 0 -v ./prover
lastest-zk-version:
curl -sL https://api.github.com/repos/scroll-tech/common-rs/commits | jq -r ".[0].sha"
curl -sL https://api.github.com/repos/scroll-tech/scroll-zkevm/commits | jq -r ".[0].sha"
lint: ## Lint the files - used for CI
GOBIN=$(PWD)/build/bin go run ../build/lint.go

View File

@@ -15,5 +15,5 @@ func TestRunRoller(t *testing.T) {
// wait result
roller.ExpectWithTimeout(true, time.Second*3, fmt.Sprintf("roller version %s", version.Version))
roller.RunApp(false)
roller.RunApp(nil)
}

View File

@@ -3,15 +3,15 @@ module scroll-tech/roller
go 1.18
require (
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
github.com/urfave/cli/v2 v2.10.2
go.etcd.io/bbolt v1.3.6
)
require (
github.com/btcsuite/btcd v0.20.1-beta // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cpuguy83/go-md2man/v2 v2.0.2 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/ethereum/go-ethereum v1.10.26 // indirect
@@ -25,7 +25,7 @@ require (
github.com/shirou/gopsutil v3.21.11+incompatible // indirect
github.com/xrash/smetrics v0.0.0-20201216005158-039620a65673 // indirect
github.com/yusufpapurcu/wmi v1.2.2 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -72,8 +72,8 @@ github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA
github.com/cespare/cp v0.1.0/go.mod h1:SOGHArjBr4JWaSDEVpWpo/hNg6RoKrls6Oh40hiwW+s=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@@ -323,8 +323,8 @@ github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/russross/blackfriday/v2 v2.1.0 h1:JIOH55/0cWyOuilr9/qlrm0BSXldqnqwMsf35Ld67mk=
github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e/go.mod h1:jurIpDQ0hqtp9//xxeWzr8X9KMP/+TYn+vz3K1wZrv0=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/scroll-tech/zktrie v0.3.1 h1:HlR+fMBdjXX1/7cUMqpUgGEhGy/3vN1JpwQ0ovg/Ys8=
github.com/scroll-tech/zktrie v0.3.1/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
@@ -346,6 +346,7 @@ github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4/go.mod h1:RZL
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/testify v1.2.0/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
@@ -353,8 +354,9 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7 h1:epCh84lMvA70Z7CTTCmYQn2CKbY8j86K7/FAIr141uY=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
@@ -398,8 +400,8 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/crypto v0.5.0/go.mod h1:NK/OQwhpMQP3MwtdjgLlYHnH9ebylxKWv3e0fK+mkQU=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -512,8 +514,8 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/sys v0.4.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

View File

@@ -5,8 +5,8 @@ import (
"crypto/ecdsa"
"errors"
"fmt"
"math"
"sort"
"strings"
"sync/atomic"
"time"
@@ -29,8 +29,6 @@ import (
var (
// retry connecting to coordinator
retryWait = time.Second * 10
// net normal close
errNormalClose = errors.New("use of closed network connection")
)
// Roller contains websocket conn to coordinator, Stack, unix-socket to ipc-prover.
@@ -42,8 +40,9 @@ type Roller struct {
taskChan chan *message.TaskMsg
sub ethereum.Subscription
isClosed int64
stopChan chan struct{}
isDisconnected int64
isClosed int64
stopChan chan struct{}
priv *ecdsa.PrivateKey
}
@@ -106,10 +105,16 @@ func (r *Roller) Start() {
// Register registers Roller to the coordinator through Websocket.
func (r *Roller) Register() error {
timestamp := time.Now().Unix()
if timestamp < 0 || timestamp > math.MaxUint32 {
panic("Expected current time to be between the years 1970 and 2106")
}
authMsg := &message.AuthMsg{
Identity: &message.Identity{
Name: r.cfg.RollerName,
Timestamp: time.Now().UnixMilli(),
Timestamp: uint32(timestamp),
PublicKey: r.PublicKey(),
Version: version.Version,
},
@@ -122,12 +127,11 @@ func (r *Roller) Register() error {
token, err := r.client.RequestToken(context.Background(), authMsg)
if err != nil {
return fmt.Errorf("request token failed %v", err)
} else {
authMsg.Identity.Token = token
}
authMsg.Identity.Token = token
// Sign auth message
if err := authMsg.Sign(r.priv); err != nil {
if err = authMsg.Sign(r.priv); err != nil {
return fmt.Errorf("sign auth message failed %v", err)
}
@@ -159,6 +163,8 @@ func (r *Roller) HandleCoordinator() {
}
func (r *Roller) mustRetryCoordinator() {
atomic.StoreInt64(&r.isDisconnected, 1)
defer atomic.StoreInt64(&r.isDisconnected, 0)
for {
log.Info("retry to connect to coordinator...")
err := r.Register()
@@ -186,9 +192,6 @@ func (r *Roller) ProveLoop() {
time.Sleep(time.Second * 3)
continue
}
if strings.Contains(err.Error(), errNormalClose.Error()) {
return
}
log.Error("prove failed", "error", err)
}
}
@@ -196,73 +199,92 @@ func (r *Roller) ProveLoop() {
}
func (r *Roller) prove() error {
task, err := r.stack.Pop()
task, err := r.stack.Peek()
if err != nil {
return err
}
var proofMsg *message.ProofDetail
if task.Times > 2 {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: "prover has retried several times due to FFI panic",
ID: task.Task.ID,
Proof: &message.AggProof{},
if task.Times <= 2 {
// If panic times <= 2, try to proof the task.
if err = r.stack.UpdateTimes(task, task.Times+1); err != nil {
return err
}
_, err = r.signAndSubmitProof(proofMsg)
return err
}
// Sort BlockTraces by header number.
traces := task.Task.Traces
sort.Slice(traces, func(i, j int) bool {
return traces[i].Header.Number.Int64() < traces[j].Header.Number.Int64()
})
err = r.stack.Push(task)
if err != nil {
return err
}
log.Info("start to prove block", "task-id", task.Task.ID)
log.Info("start to prove block", "task-id", task.Task.ID)
// sort BlockTrace
traces := task.Task.Traces
sort.Slice(traces, func(i, j int) bool {
return traces[i].Header.Number.Int64() < traces[j].Header.Number.Int64()
})
proof, err := r.prover.Prove(traces)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: err.Error(),
ID: task.Task.ID,
Proof: &message.AggProof{},
// If FFI panic during Prove, the roller will restart and re-enter prove() function,
// the proof will not be submitted.
var proof *message.AggProof
proof, err = r.prover.Prove(traces)
if err != nil {
proofMsg = &message.ProofDetail{
Status: message.StatusProofError,
Error: err.Error(),
ID: task.Task.ID,
Proof: &message.AggProof{},
}
log.Error("prove block failed!", "task-id", task.Task.ID)
} else {
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
ID: task.Task.ID,
Proof: proof,
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
}
log.Error("prove block failed!", "task-id", task.Task.ID)
} else {
// when the roller has more than 3 times panic,
// it will omit to prove the task, submit StatusProofError and then Pop the task.
proofMsg = &message.ProofDetail{
Status: message.StatusOk,
Status: message.StatusProofError,
Error: "zk proving panic",
ID: task.Task.ID,
Proof: proof,
Proof: &message.AggProof{},
}
log.Info("prove block successfully!", "task-id", task.Task.ID)
}
_, err = r.stack.Pop()
if err != nil {
return err
}
ok, err := r.signAndSubmitProof(proofMsg)
if !ok {
log.Error("submit proof to coordinator failed", "task ID", proofMsg.ID)
}
return err
defer func() {
_, err = r.stack.Pop()
if err != nil {
log.Error("roller stack pop failed!", "err", err)
}
}()
r.signAndSubmitProof(proofMsg)
return nil
}
func (r *Roller) signAndSubmitProof(msg *message.ProofDetail) (bool, error) {
func (r *Roller) signAndSubmitProof(msg *message.ProofDetail) {
authZkProof := &message.ProofMsg{ProofDetail: msg}
if err := authZkProof.Sign(r.priv); err != nil {
return false, err
log.Error("sign proof error", "err", err)
return
}
return r.client.SubmitProof(context.Background(), authZkProof)
// Retry SubmitProof several times.
for i := 0; i < 3; i++ {
// When the roller is disconnected from the coordinator,
// wait until the roller reconnects to the coordinator.
for atomic.LoadInt64(&r.isDisconnected) == 1 {
time.Sleep(retryWait)
}
ok, serr := r.client.SubmitProof(context.Background(), authZkProof)
if !ok {
log.Error("submit proof to coordinator failed", "task ID", msg.ID)
return
}
if serr == nil {
return
}
log.Error("submit proof to coordinator error", "task ID", msg.ID, "error", serr)
}
}
// Stop closes the websocket connection.

View File

@@ -58,6 +58,29 @@ func (s *Stack) Push(task *ProvingTask) error {
})
}
// Peek return the top element of the Stack.
func (s *Stack) Peek() (*ProvingTask, error) {
var value []byte
if err := s.View(func(tx *bbolt.Tx) error {
bu := tx.Bucket(bucket)
c := bu.Cursor()
_, value = c.Last()
return nil
}); err != nil {
return nil, err
}
if len(value) == 0 {
return nil, ErrEmpty
}
traces := &ProvingTask{}
err := json.Unmarshal(value, traces)
if err != nil {
return nil, err
}
return traces, nil
}
// Pop pops the proving-task on the top of Stack.
func (s *Stack) Pop() (*ProvingTask, error) {
var value []byte
@@ -79,6 +102,21 @@ func (s *Stack) Pop() (*ProvingTask, error) {
if err != nil {
return nil, err
}
task.Times++
return task, nil
}
// UpdateTimes udpates the roller prove times of the proving task.
func (s *Stack) UpdateTimes(task *ProvingTask, udpateTimes int) error {
task.Times = udpateTimes
byt, err := json.Marshal(task)
if err != nil {
return err
}
key := []byte(task.Task.ID)
return s.Update(func(tx *bbolt.Tx) error {
bu := tx.Bucket(bucket)
c := bu.Cursor()
key, _ = c.Last()
return bu.Put(key, byt)
})
}

View File

@@ -57,7 +57,13 @@ func TestStack(t *testing.T) {
err = s.Push(pop)
assert.NoError(t, err)
peek, err := s.Peek()
assert.NoError(t, err)
pop2, err := s.Pop()
assert.NoError(t, err)
assert.Equal(t, 2, pop2.Times)
assert.Equal(t, peek, pop2)
s.UpdateTimes(pop2, 1)
assert.NoError(t, err)
assert.Equal(t, 1, pop2.Times)
}

View File

@@ -36,6 +36,7 @@ var (
l1gethImg docker.ImgInstance
l2gethImg docker.ImgInstance
dbImg docker.ImgInstance
redisImg docker.ImgInstance
timestamp int
wsPort int64
@@ -53,6 +54,7 @@ func setupEnv(t *testing.T) {
l1gethImg = docker.NewTestL1Docker(t)
l2gethImg = docker.NewTestL2Docker(t)
dbImg = docker.NewTestDBDocker(t, "postgres")
redisImg = docker.NewTestRedisDocker(t)
// Create a random ws port.
port, _ := rand.Int(rand.Reader, big.NewInt(2000))
@@ -70,6 +72,7 @@ func free(t *testing.T) {
assert.NoError(t, l1gethImg.Stop())
assert.NoError(t, l2gethImg.Stop())
assert.NoError(t, dbImg.Stop())
assert.NoError(t, redisImg.Stop())
// Delete temporary files.
assert.NoError(t, os.Remove(bridgeFile))
@@ -80,7 +83,8 @@ func free(t *testing.T) {
}
type appAPI interface {
RunApp(parallel bool)
WaitResult(timeout time.Duration, keyword string) bool
RunApp(waitResult func() bool)
WaitExit()
ExpectWithTimeout(parallel bool, timeout time.Duration, keyword string)
}
@@ -103,7 +107,7 @@ func runDBCliApp(t *testing.T, option, keyword string) {
// Wait expect result.
app.ExpectWithTimeout(true, time.Second*3, keyword)
app.RunApp(false)
app.RunApp(nil)
}
func runRollerApp(t *testing.T, args ...string) appAPI {
@@ -141,7 +145,10 @@ func mockBridgeConfig(t *testing.T) string {
cfg.L1Config.RelayerConfig.SenderConfig.Endpoint = l2gethImg.Endpoint()
}
if dbImg != nil {
cfg.DBConfig.DSN = dbImg.Endpoint()
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
}
if redisImg != nil {
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
}
// Store changed bridge config into a temp file.
@@ -160,11 +167,10 @@ func mockCoordinatorConfig(t *testing.T) string {
cfg.RollerManagerConfig.Verifier.MockMode = true
if dbImg != nil {
cfg.DBConfig.DSN = dbImg.Endpoint()
cfg.DBConfig.Persistence.DSN = dbImg.Endpoint()
}
if l2gethImg != nil {
cfg.L2Config.Endpoint = l2gethImg.Endpoint()
if redisImg != nil {
cfg.DBConfig.Redis.URL = redisImg.Endpoint()
}
data, err := json.Marshal(cfg)
@@ -181,8 +187,12 @@ func mockDatabaseConfig(t *testing.T) string {
cfg, err := database.NewConfig("../../database/config.json")
assert.NoError(t, err)
if dbImg != nil {
cfg.DSN = dbImg.Endpoint()
cfg.Persistence.DSN = dbImg.Endpoint()
}
if redisImg != nil {
cfg.Redis.URL = redisImg.Endpoint()
}
data, err := json.Marshal(cfg)
assert.NoError(t, err)

View File

@@ -3,8 +3,8 @@ module scroll-tech/integration-test
go 1.18
require (
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257
github.com/stretchr/testify v1.8.0
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e
github.com/stretchr/testify v1.8.1
)
require (
@@ -14,7 +14,7 @@ require (
github.com/kr/pretty v0.3.0 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/rogpeppe/go-internal v1.8.1 // indirect
golang.org/x/crypto v0.4.0 // indirect
golang.org/x/sys v0.3.0 // indirect
golang.org/x/crypto v0.5.0 // indirect
golang.org/x/sys v0.4.0 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

View File

@@ -304,7 +304,7 @@ github.com/rogpeppe/go-internal v1.8.1 h1:geMPLpDpQOgVyCg5z5GoRwLHepNdb71NXb67XF
github.com/rogpeppe/go-internal v1.8.1/go.mod h1:JeRgkft04UBgHMgCIwADu4Pn6Mtm5d4nPKWu0nJ5d+o=
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
github.com/scroll-tech/go-ethereum v1.10.14-0.20221221073256-5ca70bf3a257 h1:FjBC0Ww42WRoiB5EQFxoIEcJqoEUw2twdhN9nGkVCQA=
github.com/scroll-tech/go-ethereum v1.10.14-0.20230113082126-cdaea939622e h1:TAqAeQiQI6b+TRyqyQ6qhizqY35LhqYe8lWhG0nNRGw=
github.com/scroll-tech/zktrie v0.3.0/go.mod h1:CuJFlG1/soTJJBAySxCZgTF7oPvd5qF6utHOEciC43Q=
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
github.com/segmentio/kafka-go v0.2.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=
@@ -329,8 +329,8 @@ github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81P
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0 h1:pSgiaMZlXftHpm5L7V1+rVB+AZJydKsMxsQBIJw4PKk=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
github.com/syndtr/goleveldb v1.0.1-0.20210819022825-2ae1ddf74ef7/go.mod h1:q4W45IWZaF22tdD+VEXcAWRA037jwmWEB5VWYORlTpc=
github.com/tinylib/msgp v1.0.2/go.mod h1:+d+yLhGm8mzTaHzB+wgMYrodPfmZrzkirds8fDWklFE=
github.com/tklauser/go-sysconf v0.3.5/go.mod h1:MkWzOF4RMCshBAMXuhXJs64Rte09mITnppBXY/rYEFI=
@@ -363,8 +363,7 @@ golang.org/x/crypto v0.0.0-20200820211705-5c72a883971a/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20201221181555-eec23a3978ad/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
golang.org/x/crypto v0.0.0-20211117183948-ae814b36b871/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.4.0 h1:UVQgzMY87xqpKNgb+kDsll2Igd33HszWHFLmpaRMq/8=
golang.org/x/crypto v0.4.0/go.mod h1:3quD/ATkf6oY+rnes5c3ExXTbLc8mueNue5/DoinL80=
golang.org/x/crypto v0.5.0 h1:U/0M97KRkSFvyD/3FSmdP5W5swImpNgle/EHFhOsQPE=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@@ -475,8 +474,7 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.3.0 h1:w8ZOecv6NaNa/zC8944JTU3vz4u6Lagfk4RPQxv92NQ=
golang.org/x/sys v0.3.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.4.0 h1:Zr2JFtRQNX3BCZ8YtxRE9hNJYC8J6I1MVbMg6owUp18=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=

View File

@@ -28,19 +28,16 @@ func testStartProcess(t *testing.T) {
// Start bridge process.
bridgeCmd := runBridgeApp(t)
bridgeCmd.ExpectWithTimeout(true, time.Second*10, "Start bridge successfully")
bridgeCmd.RunApp(true)
bridgeCmd.RunApp(func() bool { return bridgeCmd.WaitResult(time.Second*20, "Start bridge successfully") })
// Start coordinator process.
coordinatorCmd := runCoordinatorApp(t, "--ws", "--ws.port", "8391")
coordinatorCmd.ExpectWithTimeout(true, time.Second*10, "Start coordinator successfully")
coordinatorCmd.RunApp(true)
coordinatorCmd.RunApp(func() bool { return coordinatorCmd.WaitResult(time.Second*20, "Start coordinator successfully") })
// Start roller process.
rollerCmd := runRollerApp(t)
rollerCmd.ExpectWithTimeout(true, time.Second*20, "roller start successfully")
rollerCmd.ExpectWithTimeout(true, time.Second*30, "register to coordinator successfully!")
rollerCmd.RunApp(true)
rollerCmd.ExpectWithTimeout(true, time.Second*60, "register to coordinator successfully!")
rollerCmd.RunApp(func() bool { return rollerCmd.WaitResult(time.Second*40, "roller start successfully") })
rollerCmd.WaitExit()
bridgeCmd.WaitExit()